diff --git a/tests/functional/test_bulk_remote_changes.py b/tests/functional/test_bulk_remote_changes.py new file mode 100644 index 0000000000..cbd6e8df02 --- /dev/null +++ b/tests/functional/test_bulk_remote_changes.py @@ -0,0 +1,161 @@ +""" +Technical Background: GetChildren API can throw error + due to network issues or server load. + GetChildren API is also called when processing remote events. + +Issue: When processing remote event, a error in GetChildren API + (for a folder) call results in drive failing to process the + remaining remote events in the queue. + +Fix: Handle the error in GetChildren API gracefully and re-queue + same folder again for another remote scan + +Testing: This issue can be testing by simulating network of the API + using a mock framework: + 1. Emulate the GetChildren API error by mocking the + Remote.get_fs_children method + 2. The mocked method will raise an exception on demand + to simulate the server side / network errors + +Note: searching for the following regular expression in log file + will filter the manual test case: + STEP:|VERIFY:|Error: +""" + +from logging import getLogger +from time import sleep +from unittest.mock import patch + +from nuxeo.utils import version_lt +from requests import ConnectionError + +from nxdrive.client.remote_client import Remote +from nxdrive.objects import RemoteFileInfo + +from .conftest import TEST_DEFAULT_DELAY, TwoUsersTest + +log = getLogger(__name__) + + +class TestBulkRemoteChanges(TwoUsersTest): + """ + Test Bulk Remote Changes when network error happen in get_children_info() + will simulate network error when required. test_many_changes method will + make server side changes, simulate error for GetChildren API and still + verify if all remote changes are successfully synced. + """ + + def test_many_changes(self): + """ + Objective: The objective is to make a lot of remote changes (including a folder + modified) and wait for nuxeo-drive to successfully sync even if network error + happens. + + 1. Configure drive and wait for sync + 2. Create 3 folders folder1, folder2 and shared + 3. Create files inside the 3 folders: folder1/file1.txt, folder2/file2.txt, + shared/readme1.txt, shared/readme2.txt + 4. Wait for 3 folders, 4 files to sync to local PC + 5. Check the 3 folders and 4 files are synced to local PC + 6. Trigger simulation of network error for GetChildren API using the mock + (2 successive failures) + 7. Do the following changes in DM side in same order: + I. Create 'folder1/sample1.txt' + II. Delete 'shared' folder, and immediately restore 'shared' folder + IV. Restore 'shared/readme1.txt' + V. Create 'shared/readme3.txt' + VI. Create 'folder2/sample2.txt' + 8. Wait for remote changes to sync for unaffected folders folder1 and folder2 + 9. Check that folder1/sample1.txt, folder2/sample2.txt are synced to local PC + 10. Sleep for two remote scan attempts (to compensate for two network failures) + 11. Check if two files 'shared/readme1.txt' and 'shared/readme3.txt' are synced + to local PC. + """ + local = self.local_1 + remote = self.remote_document_client_1 + network_error = 2 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # create some folders on the server + folder1 = remote.make_folder(self.workspace, "folder1") + folder2 = remote.make_folder(self.workspace, "folder2") + shared = remote.make_folder(self.workspace, "shared") + + remote.make_file(folder1, "file1.txt", content=b"This is a sample file1") + remote.make_file(folder2, "file2.txt", content=b"This is a sample file2") + readme1 = remote.make_file( + shared, "readme1.txt", content=b"This is a readme file" + ) + remote.make_file(shared, "readme2.txt", content=b"This is a readme file") + + self.wait_sync(wait_for_async=True) + + assert local.exists("/folder1") + assert local.exists("/folder2") + assert local.exists("/shared") + assert local.exists("/folder1/file1.txt") + assert local.exists("/folder2/file2.txt") + assert local.exists("/shared/readme1.txt") + assert local.exists("/shared/readme2.txt") + + def get_children_info(self, *args, **kwargs): + nonlocal network_error + if network_error > 0: + network_error -= 1 + # Simulate a network error during the call to NuxeoDrive.GetChildren + raise ConnectionError( + "Network error simulated for NuxeoDrive.GetChildren" + ) + return Remote.get_fs_children(self.engine_1.remote, *args, **kwargs) + + def mock_method_factory(original): + def wrapped_method(data): + data["canScrollDescendants"] = True + return original(data) + + return wrapped_method + + with patch.object( + remote, "get_children_info", new=get_children_info + ), patch.object( + RemoteFileInfo, + "from_dict", + wraps=mock_method_factory(RemoteFileInfo.from_dict), + ): + # Simulate network error for GetChildren API twice + # This is to ensure Drive will eventually recover even after multiple + # failures of GetChildren API. + remote.make_file( + folder1, "sample1.txt", content=b"This is a another sample file1" + ) + self.remote_2.register_as_root(shared) + + # Delete folder 'shared' + remote.delete(shared) + self.wait_sync(wait_for_async=True) + + # Restore folder 'shared' from trash + remote.undelete(shared) + if version_lt(remote.client.server_version, "10.2"): + remote.undelete(readme1) + self.wait_sync(wait_for_async=True) + + remote.make_file( + shared, "readme3.txt", content=b"This is a another shared file" + ) + remote.make_file( + folder2, "sample2.txt", content=b"This is a another sample file2" + ) + + self.wait_sync(wait_for_async=True) + assert local.exists("/folder2/sample2.txt") + assert local.exists("/folder1/sample1.txt") + + # Although sync failed for one folder, GetChangeSummary will return + # zero event in successive calls. We need to wait two remote scans, + # so sleep for TEST_DEFAULT_DELAY * 2 + sleep(TEST_DEFAULT_DELAY * 2) + assert local.exists("/shared/readme1.txt") + assert local.exists("/shared/readme3.txt") diff --git a/tests/functional/test_collection.py b/tests/functional/test_collection.py new file mode 100644 index 0000000000..58b75f9f18 --- /dev/null +++ b/tests/functional/test_collection.py @@ -0,0 +1,69 @@ +""" +from contextlib import suppress + +import pytest + +from .conftest import OneUserTest + + +class TestCollection(OneUserTest): + @pytest.fixture(autouse=True) + def teardown(self): + yield + + with suppress(Exception): + # Happened when the test fails at setup_method() + self.remote_document_client_1.delete( + self.collection["uid"], use_trash=False + ) + + def test_collection_synchronization(self): + remote = self.remote_1 + + # Remove synchronization root + remote.unregister_as_root(self.workspace) + + # Create a document "Fiiile" in a folder "Test" + folder = self.remote_document_client_1.make_folder("/", "Test") + # Attach a file "abcde.txt" to the document + doc = self.remote_document_client_1.make_file_with_blob( + folder, "abcde.txt", b"abcde" + ) + + # Create a collection and add the document to it + self.collection = remote.execute( + command="Collection.Create", + name="CollectionA", + description="Test collection", + ) + remote.execute( + command="Document.AddToCollection", + collection=self.collection["uid"], + input_obj=f"doc:{doc}", + ) + + # Register the collection as the synchronization root + remote.register_as_root(self.collection["uid"]) + + # Sync locally + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Get a client on the newly synchronized collection + local = self.get_local_client(self.local_nxdrive_folder_1 / "CollectionA") + + # Check the attached file is here + assert local.exists("/abcde.txt") + + # Attach a file "fghij.txt" to the document + # This should effectively replace the previous file + # since we did not specify another xpath than the main blob. + self.remote_document_client_1.attach_blob(doc, b"fghij", "fghij.txt") + + # Sync locally + self.wait_sync(wait_for_async=True) + + # Check the new attached file is here, and the previous isn't + assert local.exists("/fghij.txt") + assert not local.exists("/abcde.txt") +""" diff --git a/tests/functional/test_concurrent_synchronization.py b/tests/functional/test_concurrent_synchronization.py new file mode 100644 index 0000000000..dfab260e4c --- /dev/null +++ b/tests/functional/test_concurrent_synchronization.py @@ -0,0 +1,372 @@ +import time + +from nxdrive.constants import WINDOWS + +from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, TwoUsersTest + + +class TestConcurrentSynchronization(TwoUsersTest): + def create_docs(self, parent, number, name_pattern=None, delay=1.0): + return self.root_remote.execute( + command="NuxeoDrive.CreateTestDocuments", + input_obj=f"doc:{parent}", + namePattern=name_pattern, + number=number, + delay=int(delay * 1000), + ) + + def test_concurrent_file_access(self): + """Test update/deletion of a locally locked file. + + This is to simulate downstream synchronization of a file opened (thus + locked) by any program under Windows, typically MS Word. + The file should be temporary ignored and not prevent synchronization of other + pending items. + Once the file is unlocked and the cooldown period is over it should be + synchronized. + """ + # Bind the server and root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create file in the remote root workspace + uid = remote.make_file( + "/", "test_update.docx", content=b"Some content to update." + ) + remote.make_file("/", "test_delete.docx", content=b"Some content to delete.") + + # Launch first synchronization + self.wait_sync(wait_for_async=True) + assert local.exists("/test_update.docx") + assert local.exists("/test_delete.docx") + + # Open locally synchronized files to lock them and generate a + # WindowsError when trying to update / delete them + file1_path = local.get_info("/test_update.docx").filepath + file2_path = local.get_info("/test_delete.docx").filepath + with open(file1_path, "rb"), open(file2_path, "rb"): + # Update /delete existing remote files and create a new remote file + # Wait for 1 second to make sure the file's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update_content("/test_update.docx", b"Updated content.") + remote.delete("/test_delete.docx") + remote.make_file("/", "other.docx", content=b"Other content.") + + # Synchronize + self.wait_sync( + wait_for_async=True, enforce_errors=False, fail_if_timeout=False + ) + if WINDOWS: + # As local file are locked, a WindowsError should occur during the + # local update process, therefore: + # - Opened local files should still exist and not have been + # modified + # - Synchronization should not fail: doc pairs should be + # temporary ignored and other remote modifications should be + # locally synchronized + assert local.exists("/test_update.docx") + assert ( + local.get_content("/test_update.docx") == b"Some content to update." + ) + assert local.exists("/test_delete.docx") + assert ( + local.get_content("/test_delete.docx") == b"Some content to delete." + ) + assert local.exists("/other.docx") + assert local.get_content("/other.docx") == b"Other content." + + # Synchronize again + self.wait_sync(enforce_errors=False, fail_if_timeout=False) + # Temporary ignored files should be still be ignored as delay (60 seconds by + # default) is not expired, nothing should have changed + assert local.exists("/test_update.docx") + assert ( + local.get_content("/test_update.docx") == b"Some content to update." + ) + assert local.exists("/test_delete.docx") + assert ( + local.get_content("/test_delete.docx") == b"Some content to delete." + ) + + if WINDOWS: + # Cancel error delay to force retrying synchronization of pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync() + + # Previously temporary ignored files should be updated / deleted locally, + # temporary download file should not be there anymore and there + # should be no pending items left + else: + assert not (self.engine_1.download_dir / uid).is_dir() + + assert local.exists("/test_update.docx") + assert local.get_content("/test_update.docx") == b"Updated content." + assert not local.exists("/test_delete.docx") + + """ + def test_find_changes_with_many_doc_creations(self): + local = self.local_1 + + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert not local.get_children_info("/") + + # List of children names to create + n_children = 5 + child_name_pattern = "child_%03d.txt" + children_names = [child_name_pattern % i for i in range(n_children)] + + # Create the children to synchronize on the remote server concurrently + # in a long running transaction + self.create_docs( + self.workspace, n_children, name_pattern=child_name_pattern, delay=0.5 + ) + + # Wait for the synchronizer thread to complete + self.wait_sync(wait_for_async=True) + + # Check that all the children creations where detected despite the + # creation transaction spanning longer than the individual audit + # query time ranges. + local_children_names = [c.name for c in local.get_children_info("/")] + local_children_names.sort() + assert local_children_names == children_names + """ + + """ + def test_delete_local_folder_2_clients(self): + # Get local clients for each device and remote client + local1 = self.local_1 + local2 = self.local_2 + remote = self.remote_document_client_1 + + # Check synchronization roots for drive1, + # there should be 1, the test workspace + sync_roots = remote.get_roots() + assert len(sync_roots) == 1 + assert sync_roots[0].name == self.workspace_title + + # Launch first synchronization on both devices + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test workspace should be created locally on both devices + assert local1.exists("/") + assert local2.exists("/") + + # Make drive1 create a remote folder in the + # test workspace and a file inside this folder, + # then synchronize both devices + test_folder = remote.make_folder(self.workspace, "Test folder") + remote.make_file(test_folder, "test.odt", content=b"Some content.") + + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test folder should be created locally on both devices + assert local1.exists("/Test folder") + assert local1.exists("/Test folder/test.odt") + assert local2.exists("/Test folder") + assert local2.exists("/Test folder/test.odt") + + # Delete Test folder locally on one of the devices + local1.delete("/Test folder") + assert not local1.exists("/Test folder") + + # Wait for synchronization engines to complete + # Wait for Windows delete and also async + self.wait_sync(wait_win=True, wait_for_async=True, wait_for_engine_2=True) + + # Test folder should be deleted on the server and on both devices + assert not remote.exists(test_folder) + assert not local1.exists("/Test folder") + assert not local2.exists("/Test folder") + """ + + """ + def test_delete_local_folder_delay_remote_changes_fetch(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + folder = local.make_folder("/", "Test folder") + local.make_file(folder, "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test folder should be created remotely in the test workspace + assert remote.exists("/Test folder") + assert remote.exists("/Test folder/test.odt") + + # Delete Test folder locally before fetching remote changes, + # then synchronize + local.delete("/Test folder") + assert not local.exists("/Test folder") + + self.wait_sync() + + # Test folder should be deleted remotely in the test workspace. + # Even though fetching the remote changes will send + # 'documentCreated' events for Test folder and its child file + # as a result of the previous synchronization loop, since the folder + # will not have been renamed nor moved since last synchronization, + # its remote pair state will not be marked as 'modified', + # see Model.update_remote(). + # Thus the pair state will be ('deleted', 'synchronized'), resolved as + # 'locally_deleted'. + assert not remote.exists("Test folder") + + # Check Test folder has not been re-created locally + assert not local.exists("/Test folder") + """ + + def test_rename_local_folder(self): + # Get local and remote clients + local1 = self.local_1 + local2 = self.local_2 + + # Launch first synchronization + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test workspace should be created locally + assert local1.exists("/") + assert local2.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + local1.make_folder("/", "Test folder") + if WINDOWS: + # Too fast folder create-then-rename are not well handled + time.sleep(1) + local1.rename("/Test folder", "Renamed folder") + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + assert local1.exists("/Renamed folder") + assert local2.exists("/Renamed folder") + + """ + def test_delete_local_folder_update_remote_folder_property(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + folder = local.make_folder("/", "Test folder") + local.make_file(folder, "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test folder should be created remotely in the test workspace + assert remote.exists("/Test folder") + assert remote.exists("/Test folder/test.odt") + + # Delete Test folder locally and remotely update one of its properties + # concurrently, then synchronize + self.engine_1.suspend() + local.delete("/Test folder") + assert not local.exists("/Test folder") + test_folder_ref = remote.check_ref("/Test folder") + # Wait for 1 second to make sure the folder's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update( + test_folder_ref, properties={"dc:description": "Some description."} + ) + test_folder = remote.fetch(test_folder_ref) + assert test_folder["properties"]["dc:description"] == "Some description." + self.engine_1.resume() + + self.wait_sync(wait_for_async=True) + + # Test folder should be deleted remotely in the test workspace. + assert not remote.exists("/Test folder") + + # Check Test folder has not been re-created locally + assert not local.exists("/Test folder") + """ + + """ + def test_update_local_file_content_update_remote_file_property(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local file in the test workspace then synchronize + local.make_file("/", "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test file should be created remotely in the test workspace + assert remote.exists("/test.odt") + + self.engine_1.queue_manager.suspend() + # Locally update the file content and remotely update one of its + # properties concurrently, then synchronize + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.odt", b"Updated content.") + assert local.get_content("/test.odt") == b"Updated content." + test_file_ref = remote.check_ref("/test.odt") + # Wait for 1 second to make sure the file's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update(test_file_ref, properties={"dc:description": "Some description."}) + test_file = remote.fetch(test_file_ref) + assert test_file["properties"]["dc:description"] == "Some description." + time.sleep(TEST_DEFAULT_DELAY) + self.engine_1.queue_manager.resume() + + self.wait_sync(wait_for_async=True) + + # Test file should be updated remotely in the test workspace, + # and no conflict should be detected. + # Even though fetching the remote changes will send a + # 'documentModified' event for the test file as a result of its + # dc:description property update, since the file will not have been + # renamed nor moved and its content not modified since last + # synchronization, its remote pair state will not be marked as + # 'modified', see Model.update_remote(). + # Thus the pair state will be ('modified', 'synchronized'), resolved as + # 'locally_modified'. + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Updated content." + test_file = remote.fetch(test_file_ref) + assert test_file["properties"]["dc:description"] == "Some description." + assert len(remote.get_children_info(self.workspace)) == 1 + + # Check that the content of the test file has not changed + assert local.exists("/test.odt") + assert local.get_content("/test.odt") == b"Updated content." + assert len(local.get_children_info("/")) == 1 + """ diff --git a/tests/functional/test_conflicts.py b/tests/functional/test_conflicts.py new file mode 100644 index 0000000000..e7ef4e5b8b --- /dev/null +++ b/tests/functional/test_conflicts.py @@ -0,0 +1,347 @@ +import shutil +import time + +import pytest + +from .conftest import OS_STAT_MTIME_RESOLUTION, SYNC_ROOT_FAC_ID, TwoUsersTest + + +class TestConflicts(TwoUsersTest): + def setUp(self): + self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + self.file_id = self.remote_1.make_file( + self.workspace_id, "test.txt", content=b"Some content" + ).uid + self.get_remote_state = self.engine_1.dao.get_normal_state_from_remote + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/test.txt") + + def test_self_conflict(self): + remote = self.remote_1 + local = self.local_1 + # Update content on both sides by the same user, remote last + remote.update_content(self.file_id, b"Remote update") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 1 + assert local.exists("/test.txt") + assert local.get_content("/test.txt") == b"Local update" + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + # Update content on both sides by the same user, local last + remote.update_content(self.file_id, b"Remote update 2") + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.txt", b"Local update 2") + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 1 + assert local.exists("/test.txt") + assert local.get_content("/test.txt") == b"Local update 2" + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Remote update 2" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + def test_conflict_renamed_modified(self): + local = self.local_1 + remote = self.remote_2 + + # Update content on both sides by different users, remote last + time.sleep(OS_STAT_MTIME_RESOLUTION) + # Race condition is still possible + remote.update_content(self.file_id, b"Remote update") + remote.rename(self.file_id, "plop.txt") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update" + assert local.get_content("/test.txt") == b"Local update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + """ + def test_resolve_local_renamed_modified(self): + remote = self.remote_2 + + self.test_conflict_renamed_modified() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Local update" + """ + + def test_real_conflict(self): + local = self.local_1 + remote = self.remote_2 + + # Update content on both sides by different users, remote last + time.sleep(OS_STAT_MTIME_RESOLUTION) + # Race condition is still possible + remote.update_content(self.file_id, b"Remote update") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update" + assert local.get_content("/test.txt") == b"Local update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + # Update content on both sides by different users, local last + remote.update_content(self.file_id, b"Remote update 2") + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.txt", b"Local update 2") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update 2" + assert local.get_content("/test.txt") == b"Local update 2" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + def test_resolve_local(self): + self.test_real_conflict() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + assert self.remote_2.get_content(self.file_id) == b"Local update 2" + + def test_resolve_local_folder(self): + local = self.local_1 + remote = self.remote_1 + + self.engine_1.suspend() + folder = remote.make_folder(self.workspace_id, "ABC").uid + self.engine_1.resume() + self.wait_sync(wait_for_async=True) + + self.engine_1.suspend() + local.rename("/ABC", "ABC_123") + remote.rename(folder, "ABC_234") + self.engine_1.resume() + self.wait_sync(wait_for_async=True) + + pair = self.get_remote_state(folder) + assert pair.pair_state == "conflicted" + + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + pair = self.get_remote_state(folder) + assert pair.pair_state == "synchronized" + + children = local.get_children_info("/") + assert len(children) == 2 + assert not children[1].folderish + assert children[0].folderish + assert children[0].name == "ABC_123" + + children = remote.get_fs_children(self.workspace_id) + assert len(children) == 2 + assert not children[0].folderish + assert children[1].folderish + assert children[1].name == "ABC_123" + + def test_resolve_remote(self): + self.test_real_conflict() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_remote(pair.id) + self.wait_sync(wait_for_async=True) + assert self.local_1.get_content("/test.txt") == b"Remote update 2" + + def test_conflict_on_lock(self): + doc_uid = self.file_id.split("#")[-1] + local = self.local_1 + remote = self.remote_2 + self.remote_document_client_2.lock(doc_uid) + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Some content" + remote.update_content(self.file_id, b"Remote update") + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + self.remote_document_client_2.unlock(doc_uid) + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + @pytest.mark.randombug( + "NXDRIVE-776: Random bug but we cannot use " + "pytest.mark.random because this test would " + "take ~30 minutes to complete.", + mode="BYPASS", + ) + def test_XLS_conflict_on_locked_document(self): + self._XLS_local_update_on_locked_document(locked_from_start=False) + + @pytest.mark.randombug( + "NXDRIVE-776: Random bug but we cannot use " + "pytest.mark.random because this test would " + "take ~30 minutes to complete.", + mode="BYPASS", + ) + def test_XLS_conflict_on_locked_document_from_start(self): + self._XLS_local_update_on_locked_document() + + def _XLS_local_update_on_locked_document(self, locked_from_start=True): + remote = self.remote_2 + local = self.local_1 + + # user2: create remote XLS file + fs_item_id = remote.make_file( + self.workspace_id, + "Excel 97 file.xls", + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00", + ).uid + doc_uid = fs_item_id.split("#")[-1] + self.wait_sync(wait_for_async=True) + assert local.exists("/Excel 97 file.xls") + + if locked_from_start: + # user2: lock document before user1 opening it + self.remote_document_client_2.lock(doc_uid) + self.wait_sync(wait_for_async=True) + local.unset_readonly("/Excel 97 file.xls") + + # user1: simulate opening XLS file with MS Office ~= update its content + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + self.wait_sync(wait_for_async=locked_from_start) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + if locked_from_start: + # remote content hasn't changed, pair state is conflicted + # and remote_can_update flag is False + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + assert pair_state.pair_state == "unsynchronized" + assert not pair_state.remote_can_update + else: + # remote content has changed, pair state is synchronized + # and remote_can_update flag is True + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + assert pair_state.pair_state == "synchronized" + assert pair_state.remote_can_update + + if not locked_from_start: + # user2: lock document after user1 opening it + self.remote_document_client_2.lock(doc_uid) + self.wait_sync(wait_for_async=True) + + # user1: simulate updating XLS file with MS Office + # 1. Create empty file 787D3000 + # 2. Update 787D3000 + # 3. Update Excel 97 file.xls + # 4. Update 787D3000 + # 5. Move Excel 97 file.xls to 1743B25F.tmp + # 6. Move 787D3000 to Excel 97 file.xls + # 7. Update Excel 97 file.xls + # 8. Update 1743B25F.tmp + # 9. Update Excel 97 file.xls + # 10. Delete 1743B25F.tmp + local.make_file("/", "787D3000") + local.update_content("/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00") + local.unset_readonly("/Excel 97 file.xls") + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" + ) + local.update_content( + "/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + shutil.move(local.abspath("/Excel 97 file.xls"), local.abspath("/1743B25F.tmp")) + shutil.move(local.abspath("/787D3000"), local.abspath("/Excel 97 file.xls")) + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03\x04" + ) + local.update_content( + "/1743B25F.tmp", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + local.delete_final("/1743B25F.tmp") + self.wait_sync(wait_for_async=not locked_from_start) + assert len(local.get_children_info("/")) == 2 + assert ( + local.get_content("/Excel 97 file.xls") + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + # remote content hasn't changed, pair state is conflicted + # and remote_can_update flag is False + if locked_from_start: + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + else: + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "unsynchronized" + assert not pair_state.remote_can_update + + # user2: remote update, conflict is detected once again + # and remote_can_update flag is still False + remote.update_content( + fs_item_id, + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02", + "New Excel 97 file.xls", + ) + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 2 + assert local.exists("/Excel 97 file.xls") + assert ( + local.get_content("/Excel 97 file.xls") + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + + assert len(remote.get_fs_children(self.workspace_id)) == 2 + assert remote.get_fs_info(fs_item_id).name == "New Excel 97 file.xls" + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" + ) + + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "conflicted" + assert not pair_state.remote_can_update + + # user2: unlock document, conflict is detected once again + # and remote_can_update flag is now True + self.remote_document_client_2.unlock(doc_uid) + self.wait_sync(wait_for_async=True) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "conflicted" + assert pair_state.remote_can_update diff --git a/tests/functional/test_copy.py b/tests/functional/test_copy.py new file mode 100644 index 0000000000..83910442b6 --- /dev/null +++ b/tests/functional/test_copy.py @@ -0,0 +1,28 @@ +from .conftest import OneUserTest + + +class TestCopy(OneUserTest): + def test_synchronize_remote_copy(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Create a file and a folder in the remote root workspace + remote.make_file("/", "test.odt", content=b"Some content.") + remote.make_folder("/", "Test folder") + + # Launch ndrive and check synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/test.odt") + + # Copy the file to the folder remotely + remote.copy("/test.odt", "/Test folder") + + # Launch ndrive and check synchronization + self.wait_sync(wait_for_async=True) + assert local.exists("/test.odt") + assert local.get_content("/test.odt") == b"Some content." + assert local.exists("/Test folder/test.odt") + assert local.get_content("/Test folder/test.odt") == b"Some content." diff --git a/tests/functional/test_direct_transfer.py b/tests/functional/test_direct_transfer.py new file mode 100644 index 0000000000..d81a0c9ec3 --- /dev/null +++ b/tests/functional/test_direct_transfer.py @@ -0,0 +1,1211 @@ +""" +Test the Direct Transfer feature in different scenarii. +""" +import logging +import re +from pathlib import Path +from time import sleep +from typing import Optional +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from nuxeo.exceptions import HTTPError + +from nxdrive.client.uploader.direct_transfer import DirectTransferUploader +from nxdrive.constants import TransferStatus +from nxdrive.exceptions import NotFound +from nxdrive.options import Options +from nxdrive.utils import get_tree_list + +from .. import ensure_no_exception +from .conftest import OneUserNoSync, OneUserTest + + +class DirectTransfer: + def setUp(self): + # No sync root, to ease testing + self.remote_1.unregister_as_root(self.workspace) + self.engine_1.start() + + # Lower chunk_* options to have chunked uploads without having to create big files + self.default_chunk_limit = Options.chunk_limit + self.default_chunk_size = Options.chunk_size + Options.chunk_limit = 1 + Options.chunk_size = 1 + + # The file used for the Direct Transfer + source = ( + self.location / "resources" / "databases" / "engine_migration_duplicate.db" + ) + assert source.stat().st_size > 1024 * 1024 * 1.5 + source_data = source.read_bytes() + + # Work with a copy of the file to allow parallel testing + self.file = self.tmpdir / f"{uuid4()}.bin" + self.file.write_bytes(source_data * 2) + self.file_size = self.file.stat().st_size + assert self.file_size > 1024 * 1024 * 3 # Must be > 3 MiB + + def tearDown(self): + # Restore options + Options.chunk_limit = self.default_chunk_limit + Options.chunk_size = self.default_chunk_size + + def has_blob(self) -> bool: + """Check that *self.file* exists on the server and has a blob attached.""" + try: + children = self.remote_document_client_1.documents.get_children( + path=self.ws.path + ) + assert len(children) == 1 + doc = children[0] + assert doc.properties["dc:title"] == self.file.name + except Exception: + return False + return bool(doc.properties["file:content"]) + + def no_uploads(self) -> bool: + """Check there is no ongoing uploads.""" + assert not self.engine_1.dao.get_dt_upload(path=self.file) + + def sync_and_check( + self, should_have_blob: bool = True, check_for_blob: bool = True + ) -> None: + # Sync + self.wait_sync() + + # Check the error count + assert not self.engine_1.dao.get_errors(limit=0) + + # Check the uploads count + assert not list(self.engine_1.dao.get_dt_uploads()) + + # Check the file exists on the server and has a blob attached + + if not check_for_blob: + # Useful when checking for duplicates creation + return + + if should_have_blob: + assert self.has_blob() + else: + assert not self.has_blob() + + def direct_transfer( + self, + duplicate_behavior: str = "create", + last_local_selected_location: Optional[Path] = None, + new_folder: Optional[str] = None, + ) -> None: + self.engine_1.direct_transfer( + {self.file: self.file_size}, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior=duplicate_behavior, + last_local_selected_location=last_local_selected_location, + new_folder=new_folder, + ) + + def test_upload(self): + """A regular Direct Transfer.""" + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + self.direct_transfer() + self.sync_and_check() + + def test_upload_new_folder(self): + """A regular Direct Transfer inside a new remote folder.""" + + # There is no upload, right now + self.no_uploads() + new_folder_name = str(uuid4())[:6] + with ensure_no_exception(): + self.direct_transfer(new_folder=new_folder_name) + self.sync_and_check(check_for_blob=False) + + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == new_folder_name + subfolder = self.remote_document_client_1.get_children_info(children[0].uid) + assert len(subfolder) == 1 + assert subfolder[0].name == self.file.name + + def test_upload_new_folder_empty(self): + """An empty Direct Transfer that should just create a new remote folder.""" + + # There is no upload, right now + self.no_uploads() + new_folder_name = str(uuid4())[:6] + with ensure_no_exception(): + self.engine_1.direct_transfer( + {}, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior="create", + last_local_selected_location=None, + new_folder=new_folder_name, + ) + self.sync_and_check(check_for_blob=False) + + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == new_folder_name + assert not self.remote_document_client_1.get_children_info(children[0].uid) + + """ + def test_cancel_upload(self): + "" + Pause the transfer by simulating a click on the pause/resume icon + on the current upload in the DT window; and cancel the upload. + Verify that the linked session has been updated after the + upload cancel. + "" + expression = re.compile(#check old_functional) + + def callback(*_): + ""This will mimic what is done in TransferItem.qml."" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + doc_pair = dao.get_state_from_id(1) + assert doc_pair + session = dao.get_session(1) + assert session + assert session.total_items == 1 + assert session.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + engine = self.engine_1 + dao = self.engine_1.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(last_local_selected_location=self.file.parent) + self.wait_sync() + + assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) + + last_location = dao.get_config("dt_last_local_selected_location") + assert last_location + assert Path(last_location) == self.file.parent + + # Cancel the upload + upload = list(dao.get_dt_uploads())[0] + engine.cancel_upload(upload.uid) + + with self._caplog.at_level(logging.INFO): + self.sync_and_check(should_have_blob=False) + + assert not dao.get_state_from_local(upload.path) + + # Verify the session status after cancellation + doc_pair = dao.get_state_from_id(1) + assert doc_pair + session = dao.get_session(1) + assert session.total_items == 0 + assert session.status == TransferStatus.CANCELLED + + # A new Notification log should appear + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert not matches + """ + + def test_with_engine_not_started(self): + """A Direct Transfer should work even if engines are stopped.""" + self.app.quit() + pytest.xfail("Waiting for NXDRIVE-1910") + + self.engine_1.stop() + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + self.direct_transfer() + self.sync_and_check() + + @Options.mock() + def test_duplicate_file_create(self): + """ + The file already exists on the server. + The user wants to continue the transfer and create a duplicate. + """ + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # 2nd upload: a new document will be created + self.direct_transfer(duplicate_behavior="create") + self.sync_and_check(check_for_blob=False) + + # Ensure there are 2 documents on the server + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 2 + assert children[0].name == self.file.name + assert children[1].name == self.file.name + + def test_duplicate_file_ignore(self): + """ + The file already exists on the server. + The user wants to cancel the transfer to prevent duplicates. + """ + + class NoChunkUpload(DirectTransferUploader): + def upload_chunks(self, *_, **__): + """Patch Remote.upload() to be able to check that nothing will be uploaded.""" + assert 0, "No twice upload should be done!" + + def upload(*args, **kwargs): + """Set our specific uploader to check for twice upload.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=NoChunkUpload, **kwargs) + + engine = self.engine_1 + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # 2nd upload: it should be cancelled + with patch.object(engine.remote, "upload", new=upload): + self.direct_transfer(duplicate_behavior="ignore") + self.sync_and_check() + + # Ensure there is only 1 document on the server + self.sync_and_check() + + @Options.mock() + def test_duplicate_file_override(self): + """ + The file already exists on the server. + The user wants to continue the transfer and replace the document. + """ + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # To ease testing, we change local file content + self.file.write_bytes(b"blob changed!") + + # 2nd upload: the blob should be replaced on the server + self.direct_transfer(duplicate_behavior="override") + self.sync_and_check() + + # Ensure there is only 1 document on the server + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == self.file.name + + # Ensure the blob content was updated + assert ( + self.remote_1.get_blob(children[0].uid, xpath="file:content") + == b"blob changed!" + ) + + def test_pause_upload_manually(self): + """ + Pause the transfer by simulating a click on the pause/resume icon + on the current upload in the systray menu. + """ + + def callback(*_): + """ + This will mimic what is done in SystrayTranfer.qml: + - call API.pause_transfer() that will call: + - engine.dao.pause_transfer(nature, transfer_uid) + Then the upload will be paused in Remote.upload(). + """ + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + engine = self.engine_1 + dao = self.engine_1.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check() + + def test_pause_upload_automatically(self): + """ + Pause the transfer by simulating an application exit + or clicking on the Suspend menu entry from the systray. + """ + + def callback(*_): + """This will mimic what is done in SystrayMenu.qml: suspend the app.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Suspend! + self.manager_1.suspend() + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + assert dao.get_dt_uploads_with_status(TransferStatus.SUSPENDED) + + # Resume the upload + self.manager_1.resume() + self.sync_and_check() + + def test_modifying_paused_upload(self): + """Modifying a paused upload should discard the current upload.""" + + def callback(*_): + """Pause the upload and apply changes to the document.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Apply changes to the file + self.file.write_bytes(b"locally changed") + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check() + # Check the local content is correct + assert self.file.read_bytes() == b"locally changed" + + """ + @not_windows( + reason="Cannot test the behavior as the local deletion is blocked by the OS." + ) + def test_deleting_paused_upload(self): + ""Deleting a paused upload should discard the current upload."" + + def callback(*_): + ""Pause the upload and delete the document."" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Remove the document + # (this is the problematic part on Windows, because for the + # file descriptor to be released we need to escape from + # Remote.upload(), which is not possible from here) + self.file.unlink() + assert not self.file.exists() + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check(should_have_blob=False) + """ + + def test_server_error_but_upload_ok(self): + """ + Test an error happening after chunks were uploaded and the FileManager.Import operation call. + This could happen if a proxy does not understand well the final requests as seen in NXDRIVE-1753. + """ + self.app.quit() + pytest.skip("Not yet implemented.") + + class BadUploader(DirectTransferUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """Simulate a server error.""" + # Call the original method to effectively end the upload process + super().link_blob_to_doc(*args, **kwargs) + + # The file should be present on the server + # assert self.remote.exists(file_path) + + # There should be 1 upload with DONE transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # And throw an error + stack = "The proxy server received an invalid response from an upstream server." + raise HTTPError( + status=502, message="Mocked Proxy Error", stacktrace=stack + ) + + def upload(*args, **kwargs): + """Set our specific uploader to simulate server error.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + # file_path = f"{self.ws.path}/{self.file.name}" + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # There should be no upload as the Processor has checked the file existence + # on the server and so deleted the upload from the database + self.no_uploads() + + self.sync_and_check() + + def test_upload_ok_but_network_lost_in_the_meantime(self): + """ + NXDRIVE-2233 scenario: + + - Start a Direct Transfer. + - When all chunks are uploaded, and just after having called the FileManager + operation: the network connection is lost. + - The request being started, it has a 6 hours timeout. + - But the document was created on the server because the call has been made. + - Finally, after 6 hours, the network was restored in the meantime, but the + FileManager will throw a 404 error because the batchId was already consumed. + - The transfer will be displayed in the Direct Transfer window, but nothing more + will be done. + + Such transfer must be removed from the database. + """ + + class BadUploader(DirectTransferUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """End the upload and simulate a network loss.""" + # Call the original method to effectively end the upload process + super().link_blob_to_doc(*args, **kwargs) + + # And throw an error + raise NotFound("Mock'ed error") + + def upload(*args, **kwargs): + """Set our specific uploader.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + # file_path = f"{self.ws.path}/{self.file.name}" + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # The document has been created + self.sync_and_check() + + # There should be no upload as the Processor has made the clean-up + self.no_uploads() + + # There is no state to handle in the database + assert not dao.get_local_children(Path("/")) + + """ + def test_server_error_upload(self): + ""Test a server error happening after chunks were uploaded, at the Blob.AttachOnDocument operation call."" + + class BadUploader(DirectTransferUploader): + ""Used to simulate bad server responses."" + + def link_blob_to_doc(self, *args, **kwargs): + ""Simulate a server error."" + raise ConnectionError("Mocked exception") + + def upload(*args, **kwargs): + ""Set our specific uploader to simulate server error."" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # There should be 1 upload with ONGOING transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # The file does not exist on the server + assert not self.has_blob() + + self.sync_and_check() + """ + + """ + def test_chunk_upload_error(self): + ""Test a server error happening while uploading chunks."" + + def callback(uploader): + ""Mimic a connection issue after chunk 1 is sent."" + if len(uploader.blob.uploadedChunkIds) > 1: + raise ConnectionError("Mocked error") + + engine = self.engine_1 + dao = engine.dao + bad_remote = self.get_bad_remote() + bad_remote.upload_callback = callback + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine, "remote", new=bad_remote), ensure_no_exception(): + self.direct_transfer() + self.wait_sync(timeout=3) + + # There should be 1 upload with ONGOING transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # The file does not exist on the server + assert not self.has_blob() + + self.sync_and_check() + """ + + +class TestDirectTransfer(OneUserTest, DirectTransfer): + """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" + + def setUp(self): + DirectTransfer.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class TestDirectTransferNoSync(OneUserNoSync, DirectTransfer): + """Direct Transfer should work when synchronization features are not enabled.""" + + def setUp(self): + DirectTransfer.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class DirectTransferFolder: + def setUp(self): + if not self.engine_1.have_folder_upload: + self.app.quit() + pytest.skip("FileManager.CreateFolder API not available.") + + # No sync root, to ease testing + self.remote_1.unregister_as_root(self.workspace) + self.engine_1.start() + + def get_children(self, path, children_list, key): + children = self.remote_1.get_children(path)["entries"] + for child in children: + if child["type"] == "Folder": + children_list = self.get_children(child["path"], children_list, key) + children_list.append(child[key]) + return children_list + + def checks(self, created): + """Check that the content on the remote equals the created items.""" + # Ensure there is only 1 folder created at the workspace root + ws_children = self.remote_1.get_children(self.ws.path)["entries"] + assert len(ws_children) == 1 + root = ws_children[0] + + # All has been uploaded + children = self.get_children(root["path"], [root["path"]], "path") + + assert len(children) == len(created) + + # Paths cleanup for assert to use only the relative part + children = sorted(child.replace(self.ws.path, "") for child in children) + created = sorted(elem.replace(self.tmpdir.as_posix(), "") for elem in created) + assert created == children + + # There is nothing more to upload + assert not list(self.engine_1.dao.get_dt_uploads()) + + # And there is no error + assert not self.engine_1.dao.get_errors(limit=0) + + def direct_transfer(self, folder, duplicate_behavior: str = "create") -> None: + paths = {path: size for path, size in get_tree_list(folder)} # noqa + # paths = dict([(path, size) for path, size in get_tree_list(folder)]) + self.engine_1.direct_transfer( + paths, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior=duplicate_behavior, + ) + + def test_simple_folder(self): + """Test the Direct Transfer on an simple empty folder.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + root_folder = self.tmpdir / str(uuid4()) + root_folder.mkdir() + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + # Ensure there is only 1 folder created at the workspace root + children = self.remote_1.get_children(self.ws.path)["entries"] + assert len(children) == 1 + assert children[0]["title"] == root_folder.name + + # All has been uploaded + assert not list(self.engine_1.dao.get_dt_uploads()) + + def test_sub_folders(self): + """Test the Direct Transfer on an simple empty folder.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + for _ in range(3): + sub_folder = root_folder / f"folder_{str(uuid4())[:4]}" + sub_folder.mkdir() + created.append(sub_folder.as_posix()) + for _ in range(2): + sub_file = sub_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_same_name_folders(self): + """Test the Direct Transfer on folders with same names.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + + folder_a = root_folder / "folder_a" + folder_a.mkdir() + created.append(folder_a.as_posix()) + sub_file = folder_a / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + folder_b = root_folder / "folder_b" + folder_b.mkdir() + created.append(folder_b.as_posix()) + sub_file = folder_b / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + # Sub-folder + folder_a = folder_b / "folder_a" + folder_a.mkdir() + created.append(folder_a.as_posix()) + sub_file = folder_a / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_sessions(self): + """ + Test the Direct Transfer session system. + Start multiple transfers to check sessions creation. + Check the sessions status after synchronization. + """ + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + for x in range(4): + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + planned = [ + self.engine_1.dao.get_state_from_local(item) for item in created + ] + assert len(planned) == len(created) + assert all(dt["session"] == x + 1 for dt in planned) + + session = self.engine_1.dao.get_session(x + 1) + assert session + assert session.status == TransferStatus.ONGOING + + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + session = self.engine_1.dao.get_session(x + 1) + assert session + assert session.status == TransferStatus.DONE + assert session + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == x + 1 + + def test_pause_resume_session(self): + """ + Test the session pause and resume system. + The Session final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + # Ensure we have 1 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] == TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + session = dao.get_session(session["uid"]) + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.PAUSED + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status == TransferStatus.PAUSED + + engine.resume_session(1) + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] == TransferStatus.DONE + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == 1 + + def test_pause_cancel_session(self): + """ + Test the session pause and cancel system. + All Uploads should be removed and the Session final status should be CANCELLED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + # Ensure we have 1 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] == TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + session = dao.get_session(session["uid"]) + print(session) + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.PAUSED + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status == TransferStatus.PAUSED + + engine.cancel_session(1) + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] == TransferStatus.CANCELLED + + uploads = list(engine.dao.get_dt_uploads()) + assert not uploads + + @pytest.mark.xfail(reason="NXDRIVE-2495") + def test_pause_resume_session_non_chunked(self): + """ + Test the session pause and resume system for sessions containing non-chunked files. + The Session final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + upload_count = 0 + + def get_upload(*_, **__): + """Alternative version of EngineDAO.get_upload() that pause the session.""" + nonlocal upload_count + + # The first upload is the folder, we want to pause the session just before the file. + if upload_count == 0: + upload_count += 1 + return None + + # Ensure we have 0 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert not uploads + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] is TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + # Session should be paused now + session = dao.get_session(session["uid"]) + assert session.status is TransferStatus.PAUSED + + return None + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content.", encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.dao, "get_upload", new=get_upload): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status is TransferStatus.PAUSED + + uploads = list(engine.dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status is TransferStatus.PAUSED + + engine.resume_session(1) + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] is TransferStatus.DONE + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == 1 + + def test_sub_files(self): + """Test the Direct Transfer on a folder with many files.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + for _ in range(5): + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_identical_sessions(self): + """ + Create two sessions with the same file then pause them. + Ensure that two uploads are created. + The two sessions final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + dao = engine.dao + + sessions = dao.get_active_sessions_raw() + for session in sessions: + # Pause the session + dao.pause_session(session["uid"]) + sessions = dao.get_active_sessions_raw() + uploads = list(dao.get_dt_uploads()) + assert uploads + for upload in uploads: + assert upload.status is TransferStatus.PAUSED + + for _ in range(2): + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / "file_test_duplicate.txt" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + sessions = engine.dao.get_active_sessions_raw() + assert len(sessions) == 2 + for session in sessions: + assert session["status"] is TransferStatus.PAUSED + + uploads = list(engine.dao.get_dt_uploads()) + assert len(uploads) == 2 + + for session in sessions: + engine.resume_session(session["uid"]) + + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 2 + for session in sessions: + assert session["status"] is TransferStatus.DONE + assert not list(engine.dao.get_dt_uploads()) + + +class TestDirectTransferFolder(OneUserTest, DirectTransferFolder): + """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" + + def setUp(self): + DirectTransferFolder.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class TestDirectTransferFolderNoSync(OneUserNoSync, DirectTransferFolder): + """Direct Transfer should work when synchronization features are not enabled.""" + + def setUp(self): + DirectTransferFolder.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) diff --git a/tests/functional/test_encoding.py b/tests/functional/test_encoding.py new file mode 100644 index 0000000000..f253650ea7 --- /dev/null +++ b/tests/functional/test_encoding.py @@ -0,0 +1,121 @@ +import os +from pathlib import Path + +from nxdrive.client.local import FileInfo + +from ..markers import not_mac +from .conftest import OneUserTest + + +class TestEncoding(OneUserTest): + """ + def test_filename_with_accents_from_server(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Contenu sans accents." + remote.make_file(self.workspace, "Nom sans accents.doc", content=data) + remote.make_file(self.workspace, "Nom avec accents \xe9 \xe8.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert local.get_content("/Nom sans accents.doc") == data + assert local.get_content("/Nom avec accents \xe9 \xe8.doc") == data + """ + + def test_filename_with_katakana(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Content" + remote.make_file(self.workspace, "Remote \u30bc\u30ec.doc", content=data) + local.make_file("/", "Local \u30d7 \u793e.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Local \u30d7 \u793e.doc") == data + assert local.get_content("/Remote \u30bc\u30ec.doc") == data + + """ + def test_content_with_accents_from_server(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") + remote.make_file(self.workspace, "Nom sans accents.txt", content=data) + self.wait_sync(wait_for_async=True) + + assert local.get_content("/Nom sans accents.txt") == data + """ + + """ + def test_filename_with_accents_from_client(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Contenu sans accents." + local.make_file("/", "Avec accents \xe9 \xe8.doc", content=data) + local.make_file("/", "Sans accents.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Avec accents \xe9 \xe8.doc") == data + assert remote.get_content("/Sans accents.doc") == data + """ + + """ + def test_content_with_accents_from_client(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") + local.make_file("/", "Nom sans accents", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Nom sans accents") == data + """ + + def test_name_normalization(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + filename = "space\xa0 et TM\u2122.doc" + local.make_file("/", filename) + self.wait_sync(wait_for_async=True) + + assert remote.get_info("/" + filename).name == filename + + @not_mac(reason="Normalization does not work on macOS") + def test_fileinfo_normalization(self): + local = self.local_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + name = "Teste\u0301" + local.make_file("/", name, content=b"Test") + + # FileInfo() will normalize the filename + assert FileInfo(local.base_folder, Path(name), False, 0).name != name + + # The encoding should be different, + # cannot trust the get_children as they use FileInfo + children = os.listdir(local.abspath("/")) + assert len(children) == 1 + assert children[0] != name diff --git a/tests/functional/test_group_changes.py b/tests/functional/test_group_changes.py new file mode 100644 index 0000000000..b2c9b87566 --- /dev/null +++ b/tests/functional/test_group_changes.py @@ -0,0 +1,231 @@ +from logging import getLogger + +from nuxeo.exceptions import HTTPError +from nuxeo.models import Document, Group + +from .. import env +from .conftest import OneUserTest, root_remote, salt + +log = getLogger(__name__) + + +class TestGroupChanges(OneUserTest): + """ + Test that changes on groups are detected by Drive. + See https://jira.nuxeo.com/browse/NXP-14830. + """ + + def setUp(self): + self.group1 = salt("group1") + self.group2 = salt("group2") + self.parent_group = salt("parentGroup") + self.grand_parent_group = salt("grandParentGroup") + self.new_groups = ( + Group(groupname=self.group1, memberUsers=[self.user_1]), + Group(groupname=self.group2, memberUsers=[self.user_1]), + Group(groupname=self.parent_group, memberGroups=[self.group1]), + Group(groupname=self.grand_parent_group, memberGroups=[self.parent_group]), + ) + for group in self.new_groups: + self.root_remote.groups.create(group) + + # Create test workspace + workspace_name = salt("groupChangesTestWorkspace") + self.workspace_group = self.root_remote.documents.create( + Document( + name=workspace_name, + type="Workspace", + properties={"dc:title": workspace_name}, + ), + parent_path=env.WS_DIR, + ) + self.workspace_path = self.workspace_group.path + + self.admin_remote = root_remote(base_folder=self.workspace_path) + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def tearDown(self): + self.workspace_group.delete() + for group in reversed(self.new_groups): + try: + self.root_remote.groups.delete(group.groupname) + except HTTPError as exc: + if exc.status == 404: + continue + raise + + def set_ace(self, user, doc): + log.info(f"Grant ReadWrite permission to {user} on {doc}") + self.admin_remote.execute( + command="Document.SetACE", + input_obj=f"doc:{doc}", + user=user, + permission="ReadWrite", + ) + + def test_group_changes_on_sync_root(self): + """ + Test changes on a group that has access to a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + self.set_ace(self.group1, sync_root_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1) + + def test_group_changes_on_sync_root_child(self): + """ + Test changes on a group that has access + to a child of a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + log.info("Create child folder") + child_id = self.admin_remote.make_folder("/syncRoot", "child") + + self.set_ace(self.group1, sync_root_id) + self.set_ace(self.group2, child_id) + + log.info("Block inheritance on child") + self.admin_remote.block_inheritance(child_id, overwrite=False) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot and child are created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + assert self.local_root_client_1.exists("/syncRoot/child") + + self._test_group_changes("/syncRoot/child", self.group2) + + """ + def test_group_changes_on_sync_root_parent(self): + "" + Test changes on a group that has access + to the parent of a synchronization root. + "" + log.info("Create parent folder") + parent_id = self.admin_remote.make_folder("/", "parent") + + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/parent", "syncRoot") + + self.set_ace(self.group1, parent_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1) + """ + + def test_changes_with_parent_group(self): + """ + Test changes on the parent group of a group + that has access to a synchronization root. + """ + self._test_group_changes_with_ancestor_groups(self.parent_group) + + def test_changes_with_grand_parent_group(self): + """ + Test changes on the grandparent group of a group + that has access to a synchronization root. + """ + self._test_group_changes_with_ancestor_groups(self.grand_parent_group) + + def _test_group_changes(self, folder_path, group_name, need_parent=False): + """ + Tests changes on the given group that has access to the given folder: + - Remove the test user from the group. + - Add the test user to the group. + - Delete the group. + - Create the group including the test user. + """ + log.info( + "Test changes on %s for %s with need_parent=%r", + group_name, + folder_path, + need_parent, + ) + remote = self.admin_remote + local = self.local_root_client_1 + + log.info("Remove driveuser_1 from %s", group_name) + group = remote.groups.get(group_name) + group.memberUsers = [] + group.save() + + log.info("Check that %s is deleted locally", folder_path) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.info("Add driveuser_1 to %s", group_name) + group.memberUsers = [self.user_1] + group.save() + + log.info("Check that %s is created locally", folder_path) + self.wait_sync(wait_for_async=True) + assert local.exists(folder_path) + + log.info("Delete %s", group_name) + remote.groups.delete(group_name) + + log.info("Check that %s is deleted locally", folder_path) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.info("Create %s", group_name) + remote.groups.create(Group(groupname=group_name, memberUsers=[self.user_1])) + + if need_parent: + log.info( + "%s should not be created locally since " + "the newly created group has not been added yet " + "as a subgroup of parentGroup", + folder_path, + ) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.debug("Add %s as a subgroup of parentGroup", group_name) + group = remote.groups.get(self.parent_group) + group.memberGroups = [group_name] + group.save() + + log.info("Check that %s is created locally", folder_path) + self.wait_sync(wait_for_async=True) + assert local.exists(folder_path) + + def _test_group_changes_with_ancestor_groups(self, ancestor_group): + """ + Test changes on a descendant group of the given group + that has access to a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + self.set_ace(ancestor_group, sync_root_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1, need_parent=True) diff --git a/tests/functional/test_ignored.py b/tests/functional/test_ignored.py new file mode 100644 index 0000000000..76f890180e --- /dev/null +++ b/tests/functional/test_ignored.py @@ -0,0 +1,46 @@ +from pathlib import Path + +from .conftest import OneUserTest + + +class TestIgnored(OneUserTest): + def test_ignore_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + dao = self.engine_1.dao + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + remote.make_file("/", "abcde.txt", content=b"Some content.") + remote.make_file("/", "abcde.txt", content=b"Some other content.") + + self.wait_sync(wait_for_async=True) + assert local.exists("/abcde.txt") + # Check we only have one file locally + assert len(dao.get_local_children(Path("/"))) == 1 + # Check that there is an error + errors = dao.get_errors() + assert len(errors) == 1 + error_id = errors[0].id + + # Ignore the error + self.engine_1.ignore_pair(error_id, errors[0].last_error) + + self.wait_sync(wait_for_async=True) + + # Check there are no errors + assert not dao.get_errors() + # Check there is an ignored file + unsynceds = dao.get_unsynchronizeds() + assert len(unsynceds) == 1 + # Check that the ignored file is the same as the error that appeared previously + assert unsynceds[0].id == error_id + + # Force the engine to do a full scan again + self.engine_1._remote_watcher._last_remote_full_scan = None + self.wait_sync(wait_for_async=True) + + # Check that there are no errors back + assert not dao.get_errors() + assert dao.get_unsynchronized_count() == 1 diff --git a/tests/functional/test_local_changes_when_offline.py b/tests/functional/test_local_changes_when_offline.py new file mode 100644 index 0000000000..da8c71abae --- /dev/null +++ b/tests/functional/test_local_changes_when_offline.py @@ -0,0 +1,81 @@ +"""" +Test if changes made to local file system when Drive is offline sync's back +later when Drive becomes online. +""" +import pytest + +from nxdrive.constants import WINDOWS + +from .conftest import FILE_CONTENT, OneUserTest + + +class TestOfflineChangesSync(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + self.local = self.local_1 + self.remote = self.remote_document_client_1 + + # Create a folder and a file on the server + self.folder1_remote = self.remote.make_folder("/", "Folder1") + self.file1_remote = self.remote.make_file( + self.folder1_remote, "File1.txt", FILE_CONTENT + ) + self.wait_sync(wait_for_async=True) + + """ + def test_copy_paste_when_engine_suspended(self): + "" + Copy paste and a rename operation together on same file while Drive is + offline should be detected and synced to server as soon as Drive comes + back online. + "" + self.copy_past_and_rename(stop_engine=True) + """ + + @pytest.mark.randombug("Unstable on Windows", condition=WINDOWS) + def test_copy_paste_normal(self): + """ + Copy paste and a rename operation together on same file while Drive is + online should be detected and synced to server. + """ + self.copy_past_and_rename() + + def copy_past_and_rename(self, stop_engine: bool = False): + if stop_engine: + # Make Drive offline (by suspend) + self.engine_1.suspend() + + # Make a copy of the file (with xattr included) + self.local_1.copy("/Folder1/File1.txt", "/Folder1/File1 - Copy.txt") + + # Rename the original file + self.local.rename("/Folder1/File1.txt", "File1_renamed.txt") + + if stop_engine: + # Bring Drive online (by resume) + self.engine_1.resume() + + self.wait_sync() + + # Verify there is no local changes + assert self.local.exists("/Folder1/File1_renamed.txt") + assert self.local.exists("/Folder1/File1 - Copy.txt") + assert not self.local.exists("/Folder1/File1.txt") + + # Verify that local changes are uploaded to server successfully + if self.remote.exists("/Folder1/File1 - Copy.txt"): + # '/Folder1/File1 - Copy.txt' is uploaded to server. + # So original file named should be changed as 'File_renamed.txt' + remote_info = self.remote.get_info(self.file1_remote) + assert remote_info.name == "File1 - Copy.txt" + + else: + # Original file is renamed as 'File1 - Copy.txt'. + # This is a bug only if Drive is online during copy + rename + assert self.remote.exists("/Folder1/File1_renamed.txt") + remote_info = self.remote.get_info(self.file1_remote) + assert remote_info.name == "File1_renamed.txt" + + assert not self.remote.exists("/Folder1/File1.txt") diff --git a/tests/functional/test_local_copy_paste.py b/tests/functional/test_local_copy_paste.py new file mode 100644 index 0000000000..eedb717ee2 --- /dev/null +++ b/tests/functional/test_local_copy_paste.py @@ -0,0 +1,131 @@ +import shutil + +from .conftest import FILE_CONTENT, OneUserTest + + +class TestLocalCopyPaste(OneUserTest): + NUMBER_OF_LOCAL_TEXT_FILES = 10 + NUMBER_OF_LOCAL_IMAGE_FILES = 10 + NUMBER_OF_LOCAL_FILES_TOTAL = ( + NUMBER_OF_LOCAL_TEXT_FILES + NUMBER_OF_LOCAL_IMAGE_FILES + ) + FILE_NAME_PATTERN = "file%03d%s" + + """ + 1. Create folder "/A" with 100 files in it + 2. Create folder "/B" + """ + + def setUp(self): + remote = self.remote_1 + local = self.local_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + assert local.exists("/") + + # create folder A + local.make_folder("/", "A") + self.folder_path_1 = "/A" + + # create folder B + # NXDRIVE-477 If created after files are created inside A, + # creation of B isn't detected wy Watchdog! + # Reproducible with watchdemo, need to investigate. + # That's why we are now using local scan for setup_method(). + local.make_folder("/", "B") + self.folder_path_2 = "/B" + + # add text files in folder 'Nuxeo Drive Test Workspace/A' + self.local_files_list = [] + for file_num in range(1, self.NUMBER_OF_LOCAL_TEXT_FILES + 1): + filename = self.FILE_NAME_PATTERN % (file_num, ".txt") + local.make_file(self.folder_path_1, filename, FILE_CONTENT) + self.local_files_list.append(filename) + + # add image files in folder 'Nuxeo Drive Test Workspace/A' + abs_folder_path_1 = local.abspath(self.folder_path_1) + test_doc_path = self.location / "resources" / "files" / "cat.jpg" + for file_num in range( + self.NUMBER_OF_LOCAL_TEXT_FILES + 1, self.NUMBER_OF_LOCAL_FILES_TOTAL + 1 + ): + filename = self.FILE_NAME_PATTERN % (file_num, ".jpg") + dst_path = abs_folder_path_1 / filename + shutil.copyfile(test_doc_path, dst_path) + self.local_files_list.append(filename) + + self.engine_1.start() + self.wait_sync() + self.engine_1.stop() + + # get remote folders reference ids + self.remote_ref_1 = local.get_remote_id(self.folder_path_1) + assert self.remote_ref_1 + self.remote_ref_2 = local.get_remote_id(self.folder_path_2) + assert self.remote_ref_2 + assert remote.fs_exists(self.remote_ref_1) + assert remote.fs_exists(self.remote_ref_2) + + assert ( + len(remote.get_fs_children(self.remote_ref_1)) + == self.NUMBER_OF_LOCAL_FILES_TOTAL + ) + + def test_local_copy_paste_files(self): + self._local_copy_paste_files() + + """ + def test_local_copy_paste_files_stopped(self): + self._local_copy_paste_files(stopped=True) + """ + + def _local_copy_paste_files(self, stopped=False): + if not stopped: + self.engine_1.start() + + # Copy all children (files) of A to B + remote = self.remote_1 + local = self.local_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + num = self.NUMBER_OF_LOCAL_FILES_TOTAL + expected_files = set(self.local_files_list) + + for f in src.iterdir(): + shutil.copy(f, dst) + + if stopped: + self.engine_1.start() + self.wait_sync(timeout=60) + + # Expect local "/A" to contain all the files + abs_folder_path_1 = local.abspath(self.folder_path_1) + assert abs_folder_path_1.exists() + children = [f.name for f in abs_folder_path_1.iterdir()] + assert len(children) == num + assert set(children) == expected_files + + # expect local "/B" to contain the same files + abs_folder_path_2 = local.abspath(self.folder_path_2) + assert abs_folder_path_2.exists() + children = [f.name for f in abs_folder_path_2.iterdir()] + assert len(children) == num + assert set(children) == expected_files + + # expect remote "/A" to contain all the files + # just compare the names + children = [ + remote_info.name + for remote_info in remote.get_fs_children(self.remote_ref_1) + ] + assert len(children) == num + assert set(children) == expected_files + + # expect remote "/B" to contain all the files + # just compare the names + children = [ + remote_info.name + for remote_info in remote.get_fs_children(self.remote_ref_2) + ] + assert len(children) == num + assert set(children) == expected_files diff --git a/tests/functional/test_local_creations.py b/tests/functional/test_local_creations.py new file mode 100644 index 0000000000..e39d0d4de7 --- /dev/null +++ b/tests/functional/test_local_creations.py @@ -0,0 +1,158 @@ +import shutil +import time +from pathlib import Path +from unittest.mock import patch + +from nxdrive.constants import MAC, WINDOWS + +from .. import ensure_no_exception +from .conftest import SYNC_ROOT_FAC_ID, OneUserTest + + +class TestLocalCreations(OneUserTest): + def test_mini_scenario(self): + local = self.local_root_client_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + local.make_folder(f"/{self.workspace_title}", "A") + folder_path_1 = f"{self.workspace_title}/A" + + test_doc_path = self.location / "resources" / "files" / "cat.jpg" + abs_folder_path_1 = local.abspath(f"/{folder_path_1}") + dst_path = abs_folder_path_1 / "cat.jpg" + shutil.copyfile(test_doc_path, dst_path) + + self.wait_sync(timeout=100) + uid = local.get_remote_id(f"/{folder_path_1}/cat.jpg") + assert remote.fs_exists(uid) + + def test_local_modification_date(self): + """Check that the files have the Platform modification date.""" + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + filename = "abc.txt" + remote.make_file("/", filename, content=b"1234") + remote_mtime = time.time() + + time.sleep(3) + + engine.start() + self.wait_sync(wait_for_async=True) + + filename = f"/{filename}" + assert local.exists(filename) + assert local.abspath(filename).stat().st_mtime < remote_mtime + + def test_local_creation_date(self): + """Check that the files have the Platform modification date.""" + remote = self.remote_1 + local = self.local_1 + engine = self.engine_1 + sleep_time = 3 + + workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + filename = "abc.txt" + file_id = remote.make_file(workspace_id, filename, content=b"1234").uid + after_ctime = time.time() + + time.sleep(sleep_time) + filename = f"a{filename}" + remote.rename(file_id, filename) + after_mtime = time.time() + + engine.start() + self.wait_sync(wait_for_async=True) + + filename = f"/{filename}" + assert local.exists(filename) + stats = local.abspath(filename).stat() + local_mtime = stats.st_mtime + + # Note: GNU/Linux does not have a creation time + if MAC or WINDOWS: + local_ctime = stats.st_birthtime if MAC else stats.st_ctime + assert local_ctime < after_ctime + assert local_ctime + sleep_time <= local_mtime + + assert local_mtime < after_mtime + 0.5 + + def recovery_scenario(self, cleanup: bool = True): + """ + A recovery test, scenario: + 1. Add a new account using the foo folder. + 2. Remove the account, keep the foo folder as-is. + 3. Remove xattrs using the clean-folder CLI argument (if *cleanup* is True). + 4. Re-add the account using the foo folder. + + The goal is to check that local data is not re-downloaded at all. + Drive should simply recreate the database and check the all files are there. + """ + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Create folders and files on the server + workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + folder_uid = self.remote_1.make_folder(workspace_id, "a folder").uid + self.remote_1.make_file(folder_uid, "file1.bin", content=b"0321" * 42) + self.remote_1.make_file(folder_uid, "file2.bin", content=b"12365" * 42) + self.remote_1.make_folder(folder_uid, "folder 2") + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Local checks + assert self.local_1.exists("/a folder") + assert self.local_1.exists("/a folder/file1.bin") + assert self.local_1.exists("/a folder/file2.bin") + assert self.local_1.exists("/a folder/folder 2") + + # Stop the engine for following actions + self.engine_1.stop() + + if cleanup: + # Remove xattrs + folder = Path("a folder") + self.local_1.clean_xattr_folder_recursive(folder, cleanup=True) + self.local_1.remove_remote_id(folder, cleanup=True) + + # Ensure xattrs are gone + assert not self.local_1.get_remote_id(folder) + assert not self.local_1.get_remote_id(folder / "file1.bin") + assert not self.local_1.get_remote_id(folder / "file2.bin") + assert not self.local_1.get_remote_id(folder / "folder 2") + + # Destroy the database but keep synced files + self.unbind_engine(1, purge=False) + + def download(*_, **__): + """ + Patch Remote.download() to be able to check that nothing + will be downloaded as local data is already there. + """ + assert 0, "No download should be done!" + + # Re-bind the account using the same folder + self.bind_engine(1, start_engine=False) + + # Start the sync + with patch.object(self.engine_1.remote, "download", new=download): + with ensure_no_exception(): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # No error expected + assert not self.engine_1.dao.get_errors(limit=0) + + # Checks + for client in (self.local_1, self.remote_1): + assert client.exists("/a folder") + assert client.exists("/a folder/file1.bin") + assert client.exists("/a folder/file2.bin") + assert client.exists("/a folder/folder 2") diff --git a/tests/functional/test_local_deletion.py b/tests/functional/test_local_deletion.py new file mode 100644 index 0000000000..39bd8faa92 --- /dev/null +++ b/tests/functional/test_local_deletion.py @@ -0,0 +1,309 @@ +import shutil + +import pytest + +from nxdrive.constants import WINDOWS + +from .conftest import OneUserTest + + +class TestLocalDeletion(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def test_untrash_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + local.make_file("/", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists("/" + file1) + + old_info = remote.get_info(f"/{file1}") + abs_path = local.abspath(f"/{file1}") + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync(wait_for_async=True) + assert not remote.exists("/" + file1) + assert not local.exists("/" + file1) + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + self.wait_sync(wait_for_async=True) + assert remote.exists(old_info.uid) + assert local.exists("/" + file1) + + def test_untrash_file_with_rename(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + file2 = "File_To_Delete2.txt" + + local.make_file("/", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(f"/{file1}") + uid = local.get_remote_id(f"/{file1}") + old_info = remote.get_info(f"/{file1}") + abs_path = local.abspath(f"/{file1}") + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file2) + self.wait_sync(wait_for_async=True) + assert not remote.exists("/" + file1) + assert not local.exists("/" + file1) + (self.local_test_folder_1 / file2).write_bytes(b"New content") + if WINDOWS: + # Python API overwrite the tag by default + (self.local_test_folder_1 / f"{file2}:ndrive").write_text( + uid, encoding="utf-8" + ) + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file2, local.abspath("/")) + self.wait_sync(wait_for_async=True) + assert remote.exists(old_info.uid) + assert local.exists("/" + file2) + assert not local.exists("/" + file1) + assert local.get_content("/" + file2) == b"New content" + + def test_move_untrash_file_on_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + self.wait_sync() + new_info = remote.get_info(old_info.uid) + assert new_info.state == "project" + assert local.exists(f"/{file1}") + # Because remote_document_client_1 was used + assert local.get_remote_id("/").endswith(new_info.parent_uid) + + """ + @Options.mock() + def test_move_untrash_file_on_parent_with_no_rights(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + + # Remove rights + folder_path = f"{self.ws.path}/ToDelete" + input_obj = "doc:" + folder_path + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_1, + permission="Read", + ) + self.root_remote.block_inheritance(folder_path, overwrite=False) + self.root_remote.delete(folder_path) + self.wait_sync(wait_for_async=True) + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + assert local.get_remote_id("/" + file1) + self.wait_sync() + assert local.exists("/" + file1) + new_uid = local.get_remote_id("/" + file1) + # Because remote_document_client_1 was used + assert new_uid + assert not new_uid.endswith(old_info.uid) + """ + + @pytest.mark.skip( + reason="Wait to know what is the expectation " + "- the previous folder does not exist" + ) + def test_move_untrash_file_on_parent_with_no_rights_on_destination(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup the test + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_folder("/", "ToCopy") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + remote.get_info(file_path) + abs_path = local.abspath(file_path) + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + + # Remove rights + folder_path = f"{self.ws.path}/ToCopy" + input_obj = "doc:" + folder_path + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_1, + permission="Read", + ) + self.root_remote.block_inheritance(folder_path, overwrite=False) + # Delete + local.delete("/ToDelete") + self.wait_sync(wait_for_async=True) + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or unsynchronized + local.unlock_ref("/ToCopy") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToCopy")) + self.wait_sync(wait_for_async=True) + + """ + def test_untrash_file_on_delete_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + local.make_folder("/", "ToDelete") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) + self.wait_sync() + assert remote.exists(old_info.uid) + new_info = remote.get_info(old_info.uid) + assert remote.exists(new_info.parent_uid) + assert local.exists(file_path) + """ + + def test_trash_file_then_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + # See if it untrash or recreate + local.make_folder("/", "ToDelete") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) + self.wait_sync() + assert remote.exists(old_info.uid) + assert local.exists(file_path) + + """ + @Options.mock() + def test_trash_file_should_respect_deletion_behavior_unsync(self): + Options.deletion_behavior = "unsync" + + local, engine = self.local_1, self.engine_1 + remote = self.remote_document_client_1 + folder, file = "folder", "file.txt" + file_path = f"/{folder}/{file}" + + # Create local data + local.make_folder("/", folder) + local.make_file(f"/{folder}", file, content=b"This is a content") + + # Sync'n check + self.wait_sync() + assert remote.exists(file_path) + + # Mimic "stop Drive" + engine.stop() + + # Delete the file + local.delete(file_path) + + # Mimic "start Drive" + engine.start() + self.wait_sync() + + # Checks + assert remote.exists(file_path) + assert not local.exists(file_path) + """ + + """ + @Options.mock() + def test_trash_file_should_respect_deletion_behavior_delete_server(self): + Options.deletion_behavior = "delete_server" + + local, engine = self.local_1, self.engine_1 + remote = self.remote_document_client_1 + folder, file = "folder", "file.txt" + file_path = f"/{folder}/{file}" + + # Create local data + local.make_folder("/", folder) + local.make_file(f"/{folder}", file, content=b"This is a content") + + # Sync'n check + self.wait_sync() + assert remote.exists(file_path) + + # Mimic "stop Drive" + engine.stop() + + # Delete the file + local.delete(file_path) + + # Mimic "start Drive" + engine.start() + self.wait_sync() + + # Checks + assert not remote.exists(file_path) + assert not local.exists(file_path) + """ diff --git a/tests/functional/test_local_filter.py b/tests/functional/test_local_filter.py new file mode 100644 index 0000000000..69b992e5cd --- /dev/null +++ b/tests/functional/test_local_filter.py @@ -0,0 +1,198 @@ +from nxdrive.constants import SYNC_ROOT + +from .conftest import FS_ITEM_ID_PREFIX, SYNC_ROOT_FAC_ID, OneUserTest + + +class TestLocalFilter(OneUserTest): + def test_synchronize_local_filter(self): + """Test that filtering remote documents is impacted client side + + Just do a single test as it is the same as + test_integration_remote_deletion + + Use cases: + - Filter delete a regular folder + => Folder should be locally deleted + - Unfilter restore folder from the trash + => Folder should be locally re-created + - Filter a synchronization root + => Synchronization root should be locally deleted + - Unfilter synchronization root from the trash + => Synchronization root should be locally re-created + + See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access + as the same uses cases are tested + """ + # Bind the server and root workspace + self.engine_1.start() + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test folder") + remote.make_file("/Test folder", "joe.txt", content=b"Some content") + self.wait_sync(wait_for_async=True) + # Fake server binding with the unit test class + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Add remote folder as filter then synchronize + doc = remote.get_info("/Test folder") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test folder") + + self.engine_1.remove_filter(doc_path) + self.wait_sync() + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test folder") + + # Delete sync root then synchronize + self.engine_1.add_filter(root_path) + self.wait_sync() + assert not local.exists("/") + + # Restore sync root from trash then synchronize + self.engine_1.remove_filter(root_path) + self.wait_sync() + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + """ + def test_synchronize_local_office_temp(self): + # Should synchronize directly local folder with hex name + # Bind the server and root workspace + hexaname = "1234ABCD" + hexafile = "2345BCDF" + self.engine_1.start() + self.wait_sync() + self.local_1.make_folder("/", hexaname) + self.local_1.make_file("/", hexafile, content=b"test") + # Make sure that a folder is synchronized directly + # no matter what and the file is postponed + self.wait_sync(enforce_errors=False, fail_if_timeout=False) + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + + # Force the postponed to ensure it's synchronized now + self.engine_1.queue_manager.requeue_errors() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/" + hexafile) + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 2 + assert children[1].name == "2345BCDF" + """ + + """ + def test_synchronize_local_filter_with_move(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test") + remote.make_file("/Test", "joe.txt", content=b"Some content") + remote.make_folder("/Test", "Subfolder") + remote.make_folder("/Test", "Filtered") + remote.make_file("/Test/Subfolder", "joe2.txt", content=b"Some content") + remote.make_file("/Test/Subfolder", "joe3.txt", content=b"Somecossntent") + remote.make_folder("/Test/Subfolder/", "SubSubfolder") + remote.make_file( + "/Test/Subfolder/SubSubfolder", "joe4.txt", content=b"Some qwqwqontent" + ) + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/Test") + assert local.exists("/Test/joe.txt") + assert local.exists("/Test/Filtered") + assert local.exists("/Test/Subfolder") + assert local.exists("/Test/Subfolder/joe2.txt") + assert local.exists("/Test/Subfolder/joe3.txt") + assert local.exists("/Test/Subfolder/SubSubfolder") + assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + + # Add remote folder as filter then synchronize + doc_file = remote.get_info("/Test/joe.txt") + doc = remote.get_info("/Test") + filtered_doc = remote.get_info("/Test/Filtered") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path_filtered = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}/{FS_ITEM_ID_PREFIX}{filtered_doc.uid}" + + self.engine_1.add_filter(doc_path_filtered) + self.wait_sync() + assert not local.exists("/Test/Filtered") + + # Move joe.txt to filtered folder on the server + remote.move(doc_file.uid, filtered_doc.uid) + self.wait_sync(wait_for_async=True) + + # It now delete on the client + assert not local.exists("/Test/joe.txt") + assert local.exists("/Test/Subfolder") + assert local.exists("/Test/Subfolder/joe2.txt") + assert local.exists("/Test/Subfolder/joe3.txt") + assert local.exists("/Test/Subfolder/SubSubfolder") + assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + + # Now move the subfolder + doc_file = remote.get_info("/Test/Subfolder") + remote.move(doc_file.uid, filtered_doc.uid) + self.wait_sync(wait_for_async=True) + + # Check that all has been deleted + assert not local.exists("/Test/joe.txt") + assert not local.exists("/Test/Subfolder") + assert not local.exists("/Test/Subfolder/joe2.txt") + assert not local.exists("/Test/Subfolder/joe3.txt") + assert not local.exists("/Test/Subfolder/SubSubfolder") + assert not local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + """ + + """ + def test_synchronize_local_filter_with_remote_trash(self): + self.engine_1.start() + + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + folder_id = remote.make_folder("/", "Test") + remote.make_file("/Test", "joe.txt", content=b"Some content") + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test") + assert local.exists("/Test/joe.txt") + + # Add remote folder as filter then synchronize + doc = remote.get_info("/Test") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test") + + # Delete remote folder then synchronize + remote.delete("/Test") + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test") + + # Restore folder from trash then synchronize + remote.undelete(folder_id) + # NXDRIVE-xx check that the folder is not created as it is filtered + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test") + """ diff --git a/tests/functional/test_local_move_and_rename.py b/tests/functional/test_local_move_and_rename.py new file mode 100644 index 0000000000..0781eaf5a3 --- /dev/null +++ b/tests/functional/test_local_move_and_rename.py @@ -0,0 +1,702 @@ +import shutil +import time +from unittest.mock import patch + +from nuxeo.exceptions import HTTPError + +from . import LocalTest +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest + +# TODO NXDRIVE-170: refactor + + +class TestLocalMoveAndRename(OneUserTest): + def setUp(self): + """ + Sets up the following local hierarchy: + Nuxeo Drive Test Workspace + |-- Original File 1.txt + |-- Original File 2.txt + |-- Original Folder 1 + | |-- Sub-Folder 1.1 + | |-- Sub-Folder 1.2 + | |-- Original File 1.1.txt + |-- Original Folder 2 + | |-- Original File 3.txt + """ + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + local = self.local_1 + local.make_file("/", "Original File 1.txt", content=b"Some Content 1") + local.make_file("/", "Original File 2.txt", content=b"Some Content 2") + + local.make_folder("/", "Original Folder 1") + local.make_folder("/Original Folder 1", "Sub-Folder 1.1") + local.make_folder("/Original Folder 1", "Sub-Folder 1.2") + + # Same content as OF1 + local.make_file( + "/Original Folder 1", "Original File 1.1.txt", content=b"Some Content 1" + ) + + local.make_folder("/", "Original Folder 2") + local.make_file( + "/Original Folder 2", "Original File 3.txt", content=b"Some Content 3" + ) + self.wait_sync() + + """ + def test_local_rename_folder_while_creating(self): + local = self.local_1 + root_local = self.local_root_client_1 + remote = self.remote_document_client_1 + marker = False + + def update_remote_state(row, *args, **kwargs): + nonlocal marker + EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) + if row.local_name == "New Folder" and not marker: + root_local.rename(row.local_path, "Renamed Folder") + marker = True + + with patch.object( + self.engine_1.dao, "update_remote_state", new=update_remote_state + ): + local.make_folder("/", "New Folder") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed Folder") + assert not local.exists("/New Folder") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed Folder") + assert info.name == "Renamed Folder" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_while_creating(self): + local = self.engine_1.local + remote = self.remote_document_client_1 + marker = False + + def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): + nonlocal local, marker + LocalTest.set_remote_id(local, ref, remote_id, name=name) + if not marker and ref.name == "File.txt": + local.rename(ref, "Renamed File.txt") + marker = True + + with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): + self.local_1.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + local = self.local_1 + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + @pytest.mark.randombug("NXDRIVE-811", condition=True, mode="REPEAT") + def test_local_rename_file_while_creating_before_marker(self): + local = self.local_1 + remote = self.remote_document_client_1 + marker = False + + def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): + nonlocal local, marker + if not marker and ref.name == "File.txt": + self.engine_1.local.rename(ref, "Renamed File.txt") + marker = True + LocalTest.set_remote_id(local, ref, remote_id, name=name) + + with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): + local.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_while_creating_after_marker(self): + marker = False + local = self.local_1 + remote = self.remote_document_client_1 + + def update_remote_state(row, *args, **kwargs): + nonlocal marker + EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) + if not marker and row.local_name == "File.txt": + self.engine_1.local.rename(row.local_path, "Renamed File.txt") + marker = True + + with patch.object( + self.engine_1.dao, "update_remote_state", new=update_remote_state + ): + local.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + def test_replace_file(self): + local = self.local_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid = local.get_remote_id("/Original File 1.txt") + local.remove_remote_id("/Original File 1.txt") + local.update_content("/Original File 1.txt", b"plop") + self.wait_sync(fail_if_timeout=False) + assert local.get_remote_id("/Original File 1.txt") == uid + + def test_local_rename_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid_1 = remote.get_info("/Original File 1.txt").uid + local.rename("/Original File 1.txt", "Renamed File 1.txt") + assert not local.exists("/Original File 1.txt") + assert local.exists("/Renamed File 1.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Renamed File 1.txt") + assert remote.get_info(uid_1).name == "Renamed File 1.txt" + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "Renamed File 1.1 \xe9.txt" + ) + assert not local.exists("/Original Folder 1/Original File 1.1.txt") + assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") + local.rename("/Renamed File 1.txt", "Renamed Again File 1.txt") + assert not local.exists("/Renamed File 1.txt") + assert local.exists("/Renamed Again File 1.txt") + + self.wait_sync() + assert not local.exists("/Renamed File 1.txt") + assert local.exists("/Renamed Again File 1.txt") + assert not local.exists("/Original Folder 1/Original File 1.1.txt") + assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") + + info_1 = remote.get_info(uid_1) + assert info_1.name == "Renamed Again File 1.txt" + + # User 1 does not have the rights to see the parent container + # of the test workspace, hence set fetch_parent_uid=False + parent_1 = remote.get_info(info_1.parent_uid, fetch_parent_uid=False) + assert parent_1.name == self.workspace_title + + info_1_1 = remote.get_info(uid_1_1) + assert info_1_1.name == "Renamed File 1.1 \xe9.txt" + + parent_1_1 = remote.get_info(info_1_1.parent_uid) + assert parent_1_1.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info_1_1.parent_uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + """ + def test_local_rename_file_uppercase_stopped(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.stop() + + # Rename /Original File 1.txt to /Renamed File 1.txt + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" + ) + + self.engine_1.start() + self.wait_sync() + + info = remote.get_info(uid) + assert info.name == "original File 1.1.txt" + + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info.parent_uid)) == 3 + """ + + def test_local_rename_file_uppercase(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" + ) + + self.wait_sync() + + info = remote.get_info(uid) + assert info.name == "original File 1.1.txt" + + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info.parent_uid)) == 3 + + def test_local_move_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" + uid = remote.get_info("/Original File 1.txt").uid + local.move("/Original File 1.txt", "/Original Folder 1") + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + + info = remote.get_info(uid) + assert info.name == "Original File 1.txt" + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 4 + assert len(remote.get_children_info(info.parent_uid)) == 4 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + """ + def test_local_move_file_rollback(self): + ""Test a local move into a folder that is not allowed on the server, + and so we locally revert/cancel the move. + Sometimes the rollback itself is canceled because the doc pair has + no a remote name. The cause is not yet known. + We would then end on such errors (see NXDRIVE-1952): + + # Nuxeo Drive <= 4.2.0 + AttributeError: 'NoneType' object has no attribute 'rstrip' + File "engine/processor.py", line 1383, in _handle_failed_remote_rename + File "client/local_client.py", line 629, in rename + File "utils.py", line 569, in safe_os_filename + File "utils.py", line 555, in safe_filename + + Or even: + + # Nuxeo Drive > 4.2.0 + TypeError: expected string or bytes-like object + File "engine/processor.py", line 1462, in _handle_failed_remote_rename + File "client/local/base.py", line 458, in rename + File "utils.py", line 622, in safe_os_filename + File "utils.py", line 607, in safe_filename + File ".../re.py", line 192, in sub + "" + local = self.local_1 + + # Move "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" + local.move("/Original File 1.txt", "/Original Folder 1") + # And change the file name too + local.rename( + "/Original Folder 1/Original File 1.txt", "Original File 1-ren.txt" + ) + # Checks + assert not local.exists("/Original File 1.txt") + assert not local.exists("/Original Folder 1/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1-ren.txt") + + def rename(*args, **kwargs): + raise ValueError("Mock'ed rename error") + + def allow_rollback(*args, **kwargs): + ""Allow rollback on all OSes."" + return True + + with patch.object(self.engine_1.remote, "rename", new=rename): + with patch.object(self.engine_1, "local_rollback", new=allow_rollback): + with ensure_no_exception(): + self.wait_sync() + + # The file has been moved again to its original location + assert not local.exists("/Original File 1.txt") + assert not local.exists("/Original File 1-ren.txt") + assert not local.exists("/Original Folder 1/Original File 1-ren.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + assert not self.engine_1.dao.get_errors(limit=0) + """ + + def test_local_move_and_rename_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid = remote.get_info("/Original File 1.txt").uid + + local.move( + "/Original File 1.txt", "/Original Folder 1", name="Renamed File 1 \xe9.txt" + ) + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") + + info = remote.get_info(uid) + assert info.name == "Renamed File 1 \xe9.txt" + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 4 + assert len(remote.get_children_info(info.parent_uid)) == 4 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + def test_local_rename_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + + # Rename a non empty folder with some content + local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1 \xe9") + + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folder has been renamed: the uid stays the same + assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" + + # The content of the renamed folder is left unchanged + file_info = remote.get_info(file_1_1) + assert file_info.name == "Original File 1.1.txt" + assert file_info.parent_uid == folder_1 + + folder_info = remote.get_info(folder_1_1) + assert folder_info.name == "Sub-Folder 1.1" + assert folder_info.parent_uid == folder_1 + + assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == 3 + assert len(remote.get_children_info(file_info.parent_uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + """ + def test_local_rename_folder_while_suspended(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + count = len(local.get_children_info("/Original Folder 1")) + self.engine_1.suspend() + + # Rename a non empty folder with some content + local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1 \xe9") + + local.rename("/Renamed Folder 1 \xe9/Sub-Folder 1.1", "Sub-Folder 2.1") + assert local.exists("/Renamed Folder 1 \xe9/Sub-Folder 2.1") + + # Same content as OF1 + local.make_file("/Renamed Folder 1 \xe9", "Test.txt", content=b"Some Content 1") + count += 1 + self.engine_1.resume() + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # The server folder has been renamed: the uid stays the same + assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" + + # The content of the renamed folder is left unchanged + file_info = remote.get_info(file_1_1) + assert file_info.name == "Original File 1.1.txt" + assert file_info.parent_uid == folder_1 + + folder_info = remote.get_info(folder_1_1) + assert folder_info.name == "Sub-Folder 2.1" + assert folder_info.parent_uid == folder_1 + assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == count + assert len(remote.get_children_info(folder_1)) == count + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + """ + def test_local_rename_file_after_create(self): + # Office 2010 and >, create a tmp file with 8 chars + # and move it right after + local = self.local_1 + remote = self.remote_document_client_1 + + local.make_file("/", "File.txt", content=b"Some Content 2") + local.rename("/File.txt", "Renamed File.txt") + + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + # Path don't change on Nuxeo + assert local.get_remote_id("/Renamed File.txt") + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_after_create_detected(self): + # MS Office 2010+ creates a tmp file with 8 chars + # and move it right after + local = self.local_1 + remote = self.remote_document_client_1 + marker = False + + def insert_local_state(info, parent_path): + nonlocal marker + if info.name == "File.txt" and not marker: + local.rename("/File.txt", "Renamed File.txt") + sleep(2) + marker = True + EngineDAO.insert_local_state(self.engine_1.dao, info, parent_path) + + with patch.object( + self.engine_1.dao, "insert_local_state", new=insert_local_state + ): + # Might be temporary ignored once + self.engine_1.queue_manager._error_interval = 3 + local.make_file("/", "File.txt", content=b"Some Content 2") + sleep(10) + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path doesn't change on Nuxeo + assert local.get_remote_id("/Renamed File.txt") + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + def test_local_move_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to move + folder_1 = remote.get_info("/Original Folder 1").uid + folder_2 = remote.get_info("/Original Folder 2").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + + # Move a non empty folder with some content + local.move("/Original Folder 1", "/Original Folder 2") + assert not local.exists("/Original Folder 1") + assert local.exists("/Original Folder 2/Original Folder 1") + + # Synchronize: only the folder move is detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folder has been moved: the uid stays the same + # The parent folder is now folder 2 + assert remote.get_info(folder_1).parent_uid == folder_2 + + # The content of the renamed folder is left unchanged + file_1_1_info = remote.get_info(file_1_1) + assert file_1_1_info.name == "Original File 1.1.txt" + assert file_1_1_info.parent_uid == folder_1 + + folder_1_1_info = remote.get_info(folder_1_1) + assert folder_1_1_info.name == "Sub-Folder 1.1" + assert folder_1_1_info.parent_uid == folder_1 + + assert len(local.get_children_info("/Original Folder 2/Original Folder 1")) == 3 + assert len(remote.get_children_info(folder_1)) == 3 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + """ + def test_concurrent_local_rename_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_2 = remote.get_info("/Original Folder 2").uid + file_3 = remote.get_info("/Original Folder 2/Original File 3.txt").uid + + # Rename a non empty folders concurrently + local.rename("/Original Folder 1", "Renamed Folder 1") + local.rename("/Original Folder 2", "Renamed Folder 2") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1") + assert not local.exists("/Original Folder 2") + assert local.exists("/Renamed Folder 2") + + # Synchronize: only the folder renamings are detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folders have been renamed: the uid stays the same + folder_1_info = remote.get_info(folder_1) + assert folder_1_info.name == "Renamed Folder 1" + + folder_2_info = remote.get_info(folder_2) + assert folder_2_info.name == "Renamed Folder 2" + + # The content of the folder has been left unchanged + file_1_1_info = remote.get_info(file_1_1) + assert file_1_1_info.name == "Original File 1.1.txt" + assert file_1_1_info.parent_uid == folder_1 + + file_3_info = remote.get_info(file_3) + assert file_3_info.name == "Original File 3.txt" + assert file_3_info.parent_uid == folder_2 + + assert len(local.get_children_info("/Renamed Folder 1")) == 3 + assert len(remote.get_children_info(folder_1)) == 3 + assert len(local.get_children_info("/Renamed Folder 2")) == 1 + assert len(remote.get_children_info(folder_2)) == 1 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + def test_local_replace(self): + local = LocalTest(self.local_test_folder_1) + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Create 2 files with the same name but different content + # in separate folders + local.make_file("/", "test.odt", content=b"Some content.") + local.make_folder("/", "folder") + shutil.copyfile( + self.local_test_folder_1 / "test.odt", + self.local_test_folder_1 / "folder" / "test.odt", + ) + local.update_content("/folder/test.odt", content=b"Updated content.") + + # Copy the newest file to the root workspace and synchronize it + sync_root = self.local_nxdrive_folder_1 / self.workspace_title + test_file = self.local_test_folder_1 / "folder" / "test.odt" + shutil.copyfile(test_file, sync_root / "test.odt") + self.wait_sync() + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Updated content." + + # Copy the oldest file to the root workspace and synchronize it. + # First wait a bit for file time stamps to increase enough. + time.sleep(OS_STAT_MTIME_RESOLUTION) + shutil.copyfile(self.local_test_folder_1 / "test.odt", sync_root / "test.odt") + self.wait_sync() + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Some content." + + """ + def test_local_rename_sync_root_folder(self): + # Use the Administrator to be able to introspect the container of the + # test workspace. + remote = DocRemote( + self.nuxeo_url, + env.NXDRIVE_TEST_USERNAME, + "nxdrive-test-administrator-device", + self.version, + password=env.NXDRIVE_TEST_PASSWORD, + base_folder=self.workspace, + ) + folder_1_uid = remote.get_info("/Original Folder 1").uid + + # Create new clients to be able to introspect the test sync root + toplevel_local_client = LocalTest(self.local_nxdrive_folder_1) + + toplevel_local_client.rename( + Path(self.workspace_title), "Renamed Nuxeo Drive Test Workspace" + ) + self.wait_sync() + + workspace_info = remote.get_info(self.workspace) + assert workspace_info.name == "Renamed Nuxeo Drive Test Workspace" + + folder_1_info = remote.get_info(folder_1_uid) + assert folder_1_info.name == "Original Folder 1" + assert folder_1_info.parent_uid == self.workspace + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + def test_local_move_with_remote_error(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Check local folder + assert local.exists("/Original Folder 1") + + # Simulate server error + bad_remote = self.get_bad_remote() + error = HTTPError(status=500, message="Mock server error") + bad_remote.make_server_call_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + local.rename("/Original Folder 1", "OSErrorTest") + self.wait_sync(timeout=5, fail_if_timeout=False) + folder_1 = remote.get_info("/Original Folder 1") + assert folder_1.name == "Original Folder 1" + assert local.exists("/OSErrorTest") + + # Set engine online as starting from here the behavior is restored + self.engine_1.set_offline(value=False) + + self.wait_sync() + folder_1 = remote.get_info(folder_1.uid) + assert folder_1.name == "OSErrorTest" + assert local.exists("/OSErrorTest") + assert len(local.get_children_info("/OSErrorTest")) == 3 + assert len(remote.get_children_info(folder_1.uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + # TODO: implement me once canDelete is checked in the synchronizer + # def test_local_move_sync_root_folder(self): + # pass diff --git a/tests/functional/test_local_move_folders.py b/tests/functional/test_local_move_folders.py new file mode 100644 index 0000000000..b43ffee0fd --- /dev/null +++ b/tests/functional/test_local_move_folders.py @@ -0,0 +1,220 @@ +import shutil +from contextlib import suppress +from pathlib import Path + +from ..utils import random_png +from .conftest import OneUserTest + + +class TestLocalMoveFolders(OneUserTest): + def _setup(self, count: int = 10, wait_for_sync: bool = True): + """ + 1. Create folder a1 at the root + 2. Create folder a2 at the root + 3. Add *count* pictures in a1 + 4. Add *count* pictures in a2 + """ + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + local = self.local_1 + remote = self.remote_1 + + # Create a1 and a2 + self.folder_path_1 = local.make_folder("/", "a1") + self.folder_path_2 = local.make_folder("/", "a2") + + names = {f"file{n + 1:03d}.png" for n in range(count)} + + for path in (self.folder_path_1, self.folder_path_2): + for name in names: + file_path = local.abspath(path) / name + random_png(file_path) + + self.engine_1.start() + + if wait_for_sync: + self.wait_sync(timeout=30, wait_win=True) + + # Check /a1 and /a2 + for folder in ("/a1", "/a2"): + # Check local files + assert local.exists(folder) + children = [child.name for child in local.get_children_info(folder)] + assert len(children) == count + assert set(children) == names + + if wait_for_sync: + # Check remote files + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + children = [child.name for child in remote.get_fs_children(uid)] + assert len(children) == count + assert set(children) == names + + def tearDown(self): + with suppress(TypeError, AttributeError): + self.engine_1._local_watcher.localScanFinished.disconnect( + self.app.local_scan_finished + ) + + def test_local_move_folder_with_files(self): + count = 10 + self._setup(count=count) + local = self.local_1 + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + shutil.move(src, dst) + self.wait_sync() + names = {f"file{n + 1:03d}.png" for n in range(count)} + + # Check that a1 doesn't exist anymore locally and remotely + assert not local.exists("/a1") + assert len(remote_doc.get_children_info(self.workspace)) == 1 + + # Check /a2 and /a2/a1 + for folder in ("/a2", "/a2/a1"): + assert local.exists(folder) + children = [ + child.name + for child in local.get_children_info(folder) + if not child.folderish + ] + assert len(children) == count + assert set(children) == names + + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + children = [ + child.name + for child in remote.get_fs_children(uid) + if not child.folderish + ] + assert len(children) == count + assert set(children) == names + + """ + def test_local_move_folder_both_sides_while_stopped(self): + self._test_local_move_folder_both_sides(False) + """ + + """ + def test_local_move_folder_both_sides_while_unbinded(self): + self._test_local_move_folder_both_sides(True) + """ + + def _test_local_move_folder_both_sides(self, unbind): + """ + NXDRIVE-647: sync when a folder is renamed locally and remotely. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create initial folder and file + folder = remote.make_folder("/", "Folder1") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # First checks, everything should be online for every one + assert remote.exists("/Folder1") + assert local.exists("/Folder1") + folder_pair_state = self.engine_1.dao.get_state_from_local( + Path(self.workspace_title) / "Folder1" + ) + assert folder_pair_state is not None + folder_remote_ref = folder_pair_state.remote_ref + + # Unbind or stop engine + if unbind: + self.send_unbind_engine(1) + self.wait_unbind_engine(1) + else: + self.engine_1.stop() + + # Make changes + remote.update(folder, properties={"dc:title": "Folder1_ServerName"}) + local.rename("/Folder1", "Folder1_LocalRename") + + # Bind or start engine and wait for sync + if unbind: + self.send_bind_engine(1) + self.wait_bind_engine(1) + else: + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Check that nothing has changed + assert len(remote.get_children_info(self.workspace)) == 1 + assert remote.exists(folder) + assert remote.get_info(folder).name == "Folder1_ServerName" + assert len(local.get_children_info("/")) == 1 + assert local.exists("/Folder1_LocalRename") + + # Check folder status + folder_pair_state = self.engine_1.dao.get_normal_state_from_remote( + folder_remote_ref + ) + assert folder_pair_state.pair_state == "conflicted" + + def test_local_move_folder(self): + """ + A simple test to ensure we do not create useless URLs. + This is to handle cases when the user creates a new folder, + it has the default name set to the local system: + "New folder" + "Nouveau dossier (2)" + ... + The folder is created directly and it generates useless URLs. + So we move the document to get back good URLs. As the document has been + renamed above, the document's title is already the good one. + """ + local = self.local_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + name_orig = "Nouveau dossier (42)" + name_new = "C'est le vrai nom pârdi !" + + local.make_folder("/", name_orig) + self.wait_sync() + + child = remote.get_children_info(self.workspace)[0] + assert child.name == name_orig + assert child.path.endswith(name_orig) + + # Rename to fix the meaningfulness URL + local.rename(f"/{name_orig}", name_new) + self.wait_sync() + + assert remote.exists(f"/{name_new}") + child = remote.get_children_info(self.workspace)[0] + assert child.name == name_new + assert child.path.endswith(name_new) + + """ + def test_local_move_root_folder_with_unicode(self): + local = self.local_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + assert local.exists("/") + + with ensure_no_exception(): + # Rename the root folder + root_path = local.base_folder.parent + local.unlock_ref(root_path, is_abs=True) + root_path.rename(root_path.with_name("root moved, 👆!")) + + self.wait_sync() + + assert not local.exists("/") + """ diff --git a/tests/functional/test_local_paste.py b/tests/functional/test_local_paste.py new file mode 100644 index 0000000000..afcbca4ac4 --- /dev/null +++ b/tests/functional/test_local_paste.py @@ -0,0 +1,138 @@ +import shutil +import tempfile +from pathlib import Path + +from nxdrive.utils import normalized_path + +from .conftest import FILE_CONTENT, OneUserTest + +TEST_TIMEOUT = 60 + + +class TestLocalPaste(OneUserTest): + NUMBER_OF_LOCAL_FILES = 25 + TEMP_FOLDER = "temp_folder" + FOLDER_A1 = Path("a1") + FOLDER_A2 = Path("a2") + FILENAME_PATTERN = "file%03d.txt" + + def setUp(self): + """ + 1. create folder 'temp/a1' with more than 20 files in it + 2. create folder 'temp/a2', empty + 3. copy 'a1' and 'a2', in this order to the test sync root + 4. repeat step 3, but copy 'a2' and 'a1', in this order + (to the test sync root) + 5. Verify that both folders and their content is sync to DM, + in both steps 3 and 4 + """ + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + local = self.local_1 + assert local.exists("/") + self.workspace_abspath = local.abspath("/") + + # Create folder a1 and a2 under a temp folder + self.local_temp = normalized_path(tempfile.mkdtemp(self.TEMP_FOLDER)) + self.folder1 = self.local_temp / self.FOLDER_A1 + self.folder1.mkdir(parents=True) + self.folder2 = self.local_temp / self.FOLDER_A2 + self.folder2.mkdir(parents=True) + + # Add files in folder 'temp/a1' + for file_num in range(1, self.NUMBER_OF_LOCAL_FILES + 1): + filename = self.FILENAME_PATTERN % file_num + (self.folder1 / filename).write_bytes(FILE_CONTENT) + + def tearDown(self): + shutil.rmtree(self.local_temp) + + """ + def test_copy_paste_empty_folder_first(self): + "" + copy 'a2' to 'Nuxeo Drive Test Workspace', + then 'a1' to 'Nuxeo Drive Test Workspace' + "" + # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) + self.wait_sync(timeout=TEST_TIMEOUT) + + self._check_integrity() + """ + + def test_copy_paste_empty_folder_last(self): + """ + copy 'a1' to 'Nuxeo Drive Test Workspace', + then 'a2' to 'Nuxeo Drive Test Workspace' + """ + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) + # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) + self.wait_sync(timeout=TEST_TIMEOUT) + + self._check_integrity() + + def _check_integrity(self): + local = self.local_1 + remote = self.remote_1 + num = self.NUMBER_OF_LOCAL_FILES + # check that '/Nuxeo Drive Test Workspace/a1' does exist + assert local.exists(self.FOLDER_A1) + # check that '/Nuxeo Drive Test Workspace/a2' does exist + assert local.exists(self.FOLDER_A2) + # check that '/Nuxeo Drive Test Workspace/a1/ has all the files + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == num + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' exists + remote_ref_1 = local.get_remote_id(self.FOLDER_A1) + assert remote.fs_exists(remote_ref_1) + # check that remote (DM) 'Nuxeo Drive Test Workspace/a2' exists + remote_ref_2 = local.get_remote_id(self.FOLDER_A2) + assert remote.fs_exists(remote_ref_2) + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' + # has all the files + children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref_1) + ] + assert len(children) == num + + def test_copy_paste_same_file(self): + local = self.local_1 + remote = self.remote_1 + name = self.FILENAME_PATTERN % 1 + workspace_abspath = local.abspath("/") + path = self.FOLDER_A1 / name + copypath = self.FOLDER_A1 / f"{name}copy" + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + (workspace_abspath / self.FOLDER_A1).mkdir() + shutil.copy2(self.folder1 / name, workspace_abspath / path) + + self.wait_sync(timeout=TEST_TIMEOUT) + + # check that '/Nuxeo Drive Test Workspace/a1' does exist + assert local.exists(self.FOLDER_A1) + # check that '/Nuxeo Drive Test Workspace/a1/ has all the files + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == 1 + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' exists + remote_ref = local.get_remote_id(self.FOLDER_A1) + assert remote.fs_exists(remote_ref) + remote_children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref) + ] + assert len(remote_children) == 1 + remote_id = local.get_remote_id(path) + + shutil.copy2(local.abspath(path), local.abspath(copypath)) + local.set_remote_id(copypath, remote_id) + self.wait_sync(timeout=TEST_TIMEOUT) + remote_children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref) + ] + assert len(remote_children) == 2 + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == 2 diff --git a/tests/functional/test_local_share_move_folders.py b/tests/functional/test_local_share_move_folders.py new file mode 100644 index 0000000000..9fb4ebf624 --- /dev/null +++ b/tests/functional/test_local_share_move_folders.py @@ -0,0 +1,121 @@ +""" +import shutil +from unittest.mock import patch + +from nxdrive.engine.watcher.constants import SECURITY_UPDATED_EVENT +from nxdrive.engine.watcher.remote_watcher import RemoteWatcher + +from ..utils import random_png +from .conftest import TwoUsersTest + + +class TestLocalShareMoveFolders(TwoUsersTest): + NUMBER_OF_LOCAL_IMAGE_FILES = 10 + + def setUp(self): + "" + 1. Create folder a1 in Nuxeo Drive Test Workspace sync root + 2. Create folder a2 in Nuxeo Drive Test Workspace sync root + 3. Add 10 image files in a1 + "" + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + local = self.local_1 + # Create a1 and a2 + self.folder_path_1 = local.make_folder("/", "a1") + self.folder_path_2 = local.make_folder("/", "a2") + + num = self.NUMBER_OF_LOCAL_IMAGE_FILES + self.names = {"file%03d.png" % file_num for file_num in range(1, num + 1)} + + # Add image files to a1 + abs_folder_path_1 = local.abspath(self.folder_path_1) + for file_num in range(1, num + 1): + file_name = "file%03d.png" % file_num + file_path = abs_folder_path_1 / file_name + random_png(file_path) + + self.engine_1.start() + self.wait_sync(timeout=60, wait_win=True) + + # Check local files in a1 + self._check_local("/a1") + + # Check remote files in a1 + self._check_remote("/a1") + + def _check_local(self, folder): + local = self.local_1 + assert local.exists(folder) + + children = [child.name for child in local.get_children_info(folder)] + assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES + assert set(children) == self.names + + def _check_remote(self, folder): + local = self.local_1 + remote = self.remote_1 + + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + + children = [child.name for child in remote.get_fs_children(uid)] + assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES + assert set(children) == self.names + + def test_local_share_move_folder_with_files(self): + remote = self.root_remote + local = self.local_1 + + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + + input_obj = local.get_remote_id("/a1").split("#")[-1] + remote.execute( + command="Document.AddPermission", + input_obj=input_obj, + username=self.user_2, + permission="Everything", + ) + + original_get_changes = RemoteWatcher._get_changes + + def get_changes(self): + summary = original_get_changes(self) + for event in summary["fileSystemChanges"]: + if event["eventId"] == SECURITY_UPDATED_EVENT: + nonlocal src + nonlocal dst + shutil.move(src, dst) + return summary + + with patch.object(RemoteWatcher, "_get_changes", new=get_changes): + self.wait_sync() + + # Sync after move operation + self.wait_sync() + # Check that a1 doesn't exist anymore locally + assert not local.exists("/a1") + + # Check local files in a2/a1 + self._check_local("/a2/a1") + + # Check that a1 doesn't exist anymore remotely + assert len(remote.get_children_info(self.workspace)) == 1 + + # Check remote files in a2/a1 + self._check_remote("/a2/a1") + + # As Admin create a folder inside a1 + uid = local.get_remote_id("/a2/a1") + remote.make_folder(uid.split("#")[-1], "inside_a1") + + self.wait_sync() + + # Check that a1 doesn't exist anymore locally + assert local.exists("/a2/a1/inside_a1") +""" diff --git a/tests/functional/test_local_storage_issue.py b/tests/functional/test_local_storage_issue.py new file mode 100644 index 0000000000..2687eeb1a6 --- /dev/null +++ b/tests/functional/test_local_storage_issue.py @@ -0,0 +1,111 @@ +import os + +from .conftest import OneUserTest + + +class TestLocalStorageIssue(OneUserTest): + def test_local_invalid_timestamp(self): + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/") + self.engine_1.stop() + self.local_1.make_file("/", "Test.txt", content=b"plop") + os.utime(self.local_1.abspath("/Test.txt"), (0, 999_999_999_999_999)) + self.engine_1.start() + self.wait_sync() + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == "Test.txt" + + """ + def test_synchronize_no_space_left_on_device(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + + # Create a file in the remote root workspace + uid = remote.make_file("/", "test_NG.odt", content=b"Some large content.") + + # We pick a random error because there is no facility + # to parametrize a method from a class derived from + # something other than object. + errno = random.choice(list(NO_SPACE_ERRORS)) + error = OSError(errno, f"(Mock) {os.strerror(errno)}") + + # Synchronize simulating a disk space related error + bad_remote = self.get_bad_remote() + bad_remote.make_download_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + self.engine_1.start() + + # By default engine will not consider being syncCompleted + # because of the temporary ignored files + self.wait_sync( + wait_for_async=True, fail_if_timeout=False, enforce_errors=False + ) + + # - temporary download file should be created locally but not moved + # - synchronization should not fail: doc pair should be temporary ignored + # - and there should be 1 error + assert (self.engine_1.download_dir / uid).is_dir() + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + assert self.engine_1.is_paused() + + # Create another file in the remote root workspace + remote.make_file("/", "test_OK.odt", content=b"Some small content.") + + # No more errors starting here + self.engine_1.resume() + self.wait_sync(wait_for_async=True, fail_if_timeout=False, enforce_errors=False) + + # Remote file should be created locally + assert local.exists("/test_OK.odt") + + # Temporary ignored file should still be ignored as delay (60 seconds by default) + # is not expired and there should still be 1 error + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + # Retry to synchronize the temporary ignored file, but still simulating + # the same disk space related error + with patch.object(self.engine_1, "remote", new=bad_remote): + # Re-queue pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync(fail_if_timeout=False, enforce_errors=False) + + # - temporary download file should be created locally but not moved + # - doc pair should be temporary ignored again + # - and there should still be 1 error + assert (self.engine_1.download_dir / uid).is_dir() + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + # Synchronize without simulating any error, as if space had been made + # available on device + self.engine_1.resume() + + # Re-queue pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync(enforce_errors=False) + + # Previously temporary ignored file should be created locally + # and there should be no more errors left + assert not (self.engine_1.download_dir / uid).is_dir() + assert local.exists("/test_NG.odt") + assert not self.engine_1.dao.get_errors(limit=0) + """ diff --git a/tests/functional/test_long_path.py b/tests/functional/test_long_path.py new file mode 100644 index 0000000000..1c227d9317 --- /dev/null +++ b/tests/functional/test_long_path.py @@ -0,0 +1,101 @@ +import os +from unittest.mock import patch + +from nxdrive.constants import WINDOWS + +from .conftest import OneUserTest + +# Number of chars in path "C:\...\Nuxeo..." is approx 96 chars +FOLDER_A = "A" * 90 +FOLDER_B = "B" * 90 +FOLDER_C = "C" * 90 +FOLDER_D = "D" * 50 +FILE = "F" * 255 + ".txt" + + +class TestLongPath(OneUserTest): + def setUp(self): + self.remote_1 = self.remote_document_client_1 + self.folder_a = self.remote_1.make_folder("/", FOLDER_A) + self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B) + self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C) + self.remote_1.make_file(self.folder_c, "File1.txt", content=b"Sample Content") + + def tearDown(self): + self.remote_1.delete(self.folder_a, use_trash=False) + + def test_long_path(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + parent_path = ( + self.local_1.abspath("/") / FOLDER_A / FOLDER_B / FOLDER_C / FOLDER_D + ) + if WINDOWS: + parent_path = f"\\\\?\\{parent_path}" + os.makedirs(parent_path, exist_ok=True) + + new_file = os.path.join(parent_path, "File2.txt") + with open(new_file, "wb") as f: + f.write(b"Hello world") + + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + remote_children_of_c = self.remote_1.get_children_info(self.folder_c) + assert len(remote_children_of_c) == 2 + folder = [item for item in remote_children_of_c if item.name == FOLDER_D][0] + assert folder.name == FOLDER_D + + remote_children_of_d = self.remote_1.get_children_info(folder.uid) + assert len(remote_children_of_d) == 1 + assert remote_children_of_d[0].name == "File2.txt" + + def test_setup_on_long_path(self): + """NXDRIVE-689: Fix error when adding a new account when installation + path is greater than 245 characters. + """ + + self.engine_1.stop() + self.engine_1.reinit() + + # On Mac, avoid permission denied error + self.engine_1.local.clean_xattr_root() + + test_folder_len = 245 - len(str(self.local_nxdrive_folder_1)) + self.local_nxdrive_folder_1 = self.local_nxdrive_folder_1 / ( + "A" * test_folder_len + ) + assert len(str(self.local_nxdrive_folder_1)) > 245 + + self.manager_1.unbind_all() + self.engine_1 = self.manager_1.bind_server( + self.local_nxdrive_folder_1, + self.nuxeo_url, + self.user_1, + password=self.password_1, + start_engine=False, + ) + + self.engine_1.start() + self.engine_1.stop() + + +class TestLongFileName(OneUserTest): + def test_long_file_name(self): + def error(*_): + nonlocal received + received = True + + received = False + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + with patch.object( + self.manager_1.notification_service, "_longPathError", new_callable=error + ): + remote.make_file(self.workspace, FILE, content=b"Sample Content") + self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) + + assert received + assert not self.local_1.exists(f"/{FILE}") diff --git a/tests/functional/test_mac_local_client.py b/tests/functional/test_mac_local_client.py new file mode 100644 index 0000000000..6ed13f0d8a --- /dev/null +++ b/tests/functional/test_mac_local_client.py @@ -0,0 +1,38 @@ +from ..markers import mac_only +from .conftest import OneUserTest + +try: + import xattr +except ImportError: + pass + + +@mac_only +class TestMacSpecific(OneUserTest): + def test_finder_in_use(self): + """Test that if Finder is using the file we postpone the sync.""" + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.local_1.make_file("/", "File.txt", content=b"Some Content 1") + + # Emulate the Finder in use flag + key = [0] * 32 # OSX_FINDER_INFO_ENTRY_SIZE + key[:8] = 0x62, 0x72, 0x6F, 0x6B, 0x4D, 0x41, 0x43, 0x53 + + xattr.setxattr( + str(self.local_1.abspath("/File.txt")), + xattr.XATTR_FINDERINFO_NAME, + bytes(bytearray(key)), + ) + + # The file should not be synced and there have no remote id + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert not self.local_1.get_remote_id("/File.txt") + + # Remove the Finder flag + self.local_1.remove_remote_id("/File.txt", name=xattr.XATTR_FINDERINFO_NAME) + + # The sync process should now handle the file and sync it + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert self.local_1.get_remote_id("/File.txt") diff --git a/tests/functional/test_multiple_files.py b/tests/functional/test_multiple_files.py new file mode 100644 index 0000000000..d2db89c5f1 --- /dev/null +++ b/tests/functional/test_multiple_files.py @@ -0,0 +1,135 @@ +""" +import shutil +from pathlib import Path + +import pytest + +from nxdrive.constants import LINUX, MAC + +from ..markers import not_linux +from .conftest import OneUserTest + + +class TestMultipleFiles(OneUserTest): + NUMBER_OF_LOCAL_FILES = 10 + SYNC_TIMEOUT = 10 # in seconds + + def setUp(self): + "" + 1. create folder 'Nuxeo Drive Test Workspace/a1' with 100 files in it + 2. create folder 'Nuxeo Drive Test Workspace/a2' + 2. create folder 'Nuxeo Drive Test Workspace/a3' + "" + + self.engine_1.start() + self.wait_sync() + local = self.local_1 + + # Create folder a1 + self.folder_path_1 = local.make_folder("/", "a1") + + # Add 100 files in folder 'Nuxeo Drive Test Workspace/a1' + for file_num in range(1, self.NUMBER_OF_LOCAL_FILES + 1): + local.make_file( + self.folder_path_1, "local%04d.txt" % file_num, content=b"content" + ) + + # Create folder a2 + self.folder_path_2 = local.make_folder("/", "a2") + self.folder_path_3 = Path("a3") + self.wait_sync(wait_for_async=True, timeout=self.SYNC_TIMEOUT) + + def test_move_and_copy_paste_folder_original_location_from_child_stopped(self): + self._move_and_copy_paste_folder_original_location_from_child() + + def test_move_and_copy_paste_folder_original_location_from_child(self): + self._move_and_copy_paste_folder_original_location_from_child(False) + + def _move_and_copy_paste_folder_original_location_from_child(self, stopped=True): + local = self.local_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + shutil.move(src, dst) + self.wait_sync(timeout=self.SYNC_TIMEOUT) + self._move_and_copy_paste_folder( + Path("a2/a1"), Path(""), Path("a2"), stopped=stopped + ) + + def _move_and_copy_paste_folder( + self, folder_1: Path, folder_2: Path, target_folder: Path, stopped=True + ): + "" + /folder_1 + /folder_2 + /target_folder + Will + move /folder1 inside /folder2/ as /folder2/folder1 + copy /folder2/folder1 into /target_folder/ + "" + if stopped: + self.engine_1.stop() + remote = self.remote_1 + local = self.local_1 + src = local.abspath(folder_1) + dst = local.abspath(folder_2) + new_path = folder_2 / folder_1.name + copy_path = target_folder / folder_1.name + shutil.move(src, dst) + # check that 'Nuxeo Drive Test Workspace/a1' does not exist anymore + assert not local.exists(folder_1) + # check that 'Nuxeo Drive Test Workspace/a2/a1' now exists + assert local.exists(new_path) + # copy the 'Nuxeo Drive Test Workspace/a2/a1' tree + # back under 'Nuxeo Drive Test Workspace' + shutil.copytree(local.abspath(new_path), local.abspath(copy_path)) + if stopped: + self.engine_1.start() + self.wait_sync(timeout=self.SYNC_TIMEOUT) + + # asserts + # expect '/a2/a1' to contain the files + # expect 'Nuxeo Drive Test Workspace/a1' to also contain the files + num = self.NUMBER_OF_LOCAL_FILES + names = {"local%04d.txt" % n for n in range(1, num + 1)} + + for path in (new_path, copy_path): + # Local + assert local.abspath(path).exists() + children = [f.name for f in local.abspath(path).iterdir()] + + assert len(children) == num + assert set(children) == names + + # Remote + uid = local.get_remote_id(path) + assert uid + + children = remote.get_fs_children(uid) + assert len(children) == num + children_names = {child.name for child in children} + assert children_names == names + + @pytest.mark.randombug("NXDRIVE-720", condition=LINUX) + @pytest.mark.randombug("NXDRIVE-813", condition=MAC) + def test_move_and_copy_paste_folder_original_location(self): + self._move_and_copy_paste_folder( + self.folder_path_1, + self.folder_path_2, + self.folder_path_1.parent, + stopped=False, + ) + + @not_linux( + reason="NXDRIVE-471: Not handled under GNU/Linux as " + "creation time is not stored" + ) + def test_move_and_copy_paste_folder_original_location_stopped(self): + self._move_and_copy_paste_folder( + self.folder_path_1, self.folder_path_2, self.folder_path_1.parent + ) + + def test_move_and_copy_paste_folder_new_location(self): + self._move_and_copy_paste_folder( + self.folder_path_1, self.folder_path_2, self.folder_path_3 + ) +"""