Skip to content

PBM-CUSTOM

PBM-CUSTOM #48

GitHub Actions / JUnit Test Report failed May 6, 2025 in 0s

60 tests run, 53 passed, 4 skipped, 3 failed.

Annotations

Check failure on line 72 in pbm-functional/pytest/test_PBM-1439.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1439.test_logical_PBM_T294

AssertionError: Errors found in PBM backup logs
2025-05-06T12:02:51Z E [rs1/rs101:27017] [backup/2025-05-06T12:02:40Z] failed to upload oplog: write data: upload to S3: upload multipart failed, upload id: ZGQxNWQ2MWItMGI1ZC00NGI2LThjNmYtMjg5MGFjZWY4ODU5LmJjZTkwNThiLTNkYWItNDcwMi1hNjJkLWI1ZTg1OTg5ODhiYXgxNzQ2NTMyOTcxMTQzNjQ5NjU4, cause: exceeded total allowed configured MaxUploadParts (5). Adjust PartSize to fit in this limit.
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f5c25195e50>

    @pytest.mark.timeout(300, func_only=True)
    def test_logical_PBM_T294(start_cluster, cluster):
        """
        Test case for to check multipart upload of backup files with custom maxUploadParts option.
        It checks that file is divided with proper part size allowing file to grow during backup.
        """
        cluster.exec_pbm_cli("config --set storage.s3.maxUploadParts=5")
        client = pymongo.MongoClient(cluster.connection)
        collection = client["test"]["bigdata"]
        doc_size_kb = 10
        total_size_mb = 1000
        total_docs = (total_size_mb * 1024) // doc_size_kb
        batch_size = 1000
        docs_before = total_docs // 3
        docs_during = total_docs - docs_before
        def insert_random_data(start_id, count):
            Cluster.log(f"Inserting {count} documents")
            for i in range(start_id, start_id + count, batch_size):
                batch = []
                for j in range(min(batch_size, (start_id + count) - i)):
                    random_data = os.urandom(doc_size_kb * 1024).hex()
                    batch.append({"_id": i + j, "payload": random_data})
                collection.insert_many(batch)
                time.sleep(0.1)
            Cluster.log(f"Inserted {count} documents")
        insert_random_data(start_id=0, count=docs_before)
        insert_thread = threading.Thread(target=insert_random_data, args=(docs_before, docs_during))
        insert_thread.start()
        Cluster.log("Starting backup")
        result = cluster.exec_pbm_cli("backup --wait")
        insert_thread.join()
        assert result.rc == 0, f"PBM backup failed\nstderr:\n{result.stderr}"
        logs=cluster.exec_pbm_cli("logs -sD -t0 -e backup")
        error_lines = [line for line in logs.stdout.splitlines() if " E " in line]
        if error_lines:
            error_summary = "\n".join(error_lines)
>           raise AssertionError(f"Errors found in PBM backup logs\n{error_summary}")
E           AssertionError: Errors found in PBM backup logs
E           2025-05-06T12:02:51Z E [rs1/rs101:27017] [backup/2025-05-06T12:02:40Z] failed to upload oplog: write data: upload to S3: upload multipart failed, upload id: ZGQxNWQ2MWItMGI1ZC00NGI2LThjNmYtMjg5MGFjZWY4ODU5LmJjZTkwNThiLTNkYWItNDcwMi1hNjJkLWI1ZTg1OTg5ODhiYXgxNzQ2NTMyOTcxMTQzNjQ5NjU4, cause: exceeded total allowed configured MaxUploadParts (5). Adjust PartSize to fit in this limit.

test_PBM-1439.py:72: AssertionError

Check failure on line 107 in pbm-functional/pytest/test_PBM-1439.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1439.test_logical_PBM_T295

AssertionError: Errors found in PBM backup logs
2025-05-06T12:03:28Z E [rs1/rs101:27017] [backup/2025-05-06T12:03:27Z] unable to proceed with the backup, active lock is present
2025-05-06T12:03:31Z E [rs1/rs101:27017] [backup/2025-05-06T12:03:27Z] failed to upload oplog: write data: upload to S3: upload multipart failed, upload id: ZGQxNWQ2MWItMGI1ZC00NGI2LThjNmYtMjg5MGFjZWY4ODU5LmU0NWVkYzkzLTQyN2UtNDZhOC1iOTZkLWE2MjNkZDc3YjMxMngxNzQ2NTMzMDExMzAwNjY0MTg5, cause: exceeded total allowed configured MaxUploadParts (5). Adjust PartSize to fit in this limit.
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f5c25195e50>

    @pytest.mark.timeout(300, func_only=True)
    def test_logical_PBM_T295(start_cluster, cluster):
        """
        Test case for to check multipart upload of oplog with custom maxUploadParts option
        """
        cluster.exec_pbm_cli("config --set storage.s3.maxUploadParts=5")
        client = pymongo.MongoClient(cluster.connection)
        collection = client["test"]["bigdata"]
        doc_size_kb = 10
        total_size_mb = 1000
        total_docs = (total_size_mb * 1024) // doc_size_kb
        batch_size = 1000
        def insert_random_data(count):
            Cluster.log(f"Inserting {count} documents")
            for i in range(0, count, batch_size):
                batch = []
                for j in range(min(batch_size, count - i)):
                    random_data = os.urandom(doc_size_kb * 1024).hex()
                    batch.append({"_id": i + j, "payload": random_data})
                collection.insert_many(batch)
                time.sleep(0.1)
            Cluster.log(f"Inserted {count} documents")
        insert_thread = threading.Thread(target=insert_random_data, args=(total_docs,))
        insert_thread.start()
        Cluster.log("Starting backup")
        result = cluster.exec_pbm_cli("backup --ns=random.* --wait")
        insert_thread.join()
        assert result.rc == 0, f"PBM backup failed\nstderr:\n{result.stderr}"
        logs = cluster.exec_pbm_cli("logs -sD -t0 -e backup")
        error_lines = [line for line in logs.stdout.splitlines() if " E " in line]
        if error_lines:
            error_summary = "\n".join(error_lines)
>           raise AssertionError(f"Errors found in PBM backup logs\n{error_summary}")
E           AssertionError: Errors found in PBM backup logs
E           2025-05-06T12:03:28Z E [rs1/rs101:27017] [backup/2025-05-06T12:03:27Z] unable to proceed with the backup, active lock is present
E           2025-05-06T12:03:31Z E [rs1/rs101:27017] [backup/2025-05-06T12:03:27Z] failed to upload oplog: write data: upload to S3: upload multipart failed, upload id: ZGQxNWQ2MWItMGI1ZC00NGI2LThjNmYtMjg5MGFjZWY4ODU5LmU0NWVkYzkzLTQyN2UtNDZhOC1iOTZkLWE2MjNkZDc3YjMxMngxNzQ2NTMzMDExMzAwNjY0MTg5, cause: exceeded total allowed configured MaxUploadParts (5). Adjust PartSize to fit in this limit.

test_PBM-1439.py:107: AssertionError

Check failure on line 55 in pbm-functional/pytest/test_PBM-1487.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1487.test_logical_PBM_T297

Failed: Timeout (>600.0s) from pytest-timeout.
Raw output
start_cluster = None, cluster = <cluster.Cluster object at 0x7f5c2552b450>

    @pytest.mark.timeout(600,func_only=True)
    def test_logical_PBM_T297(start_cluster,cluster):
        client=pymongo.MongoClient(cluster.connection)
        client.admin.command({"enableSharding": "testDB", "primaryShard": "rs1"})
        client.admin.command({"shardCollection": "testDB.test", "key": {"_id": 1}})
        client['testDB']['test'].insert_one({})
    
        backup = cluster.make_backup('logical')
    
        client.drop_database('testDB')
        # it's important to recreate db with the same primary shard
        client.admin.command({"enableSharding": "testDB", "primaryShard": "rs1"})
        client.admin.command({"shardCollection": "testDB.test", "key": {"_id": 1}})
    
        cluster.make_restore(backup)
        try:
>           count = client["testDB"]["test"].count_documents({})

test_PBM-1487.py:55: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/collection.py:2154: in count_documents
    return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/collection.py:2168: in _retryable_non_cursor_read
    return client._retryable_read(func, self._read_preference_for(s), s, operation)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:2026: in _retryable_read
    return self._retry_internal(
/usr/local/lib/python3.11/site-packages/pymongo/_csot.py:119: in csot_wrapper
    return func(self, *args, **kwargs)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:1993: in _retry_internal
    ).run()
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:2730: in run
    return self._read() if self._is_read else self._write()
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/mongo_client.py:2891: in _read
    return self._func(self._session, self._server, conn, read_pref)  # type: ignore
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/collection.py:2149: in _cmd
    result = self._aggregate_one_result(conn, read_preference, cmd, collation, session)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/collection.py:2007: in _aggregate_one_result
    result = self._command(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/collection.py:621: in _command
    return conn.command(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/helpers.py:47: in inner
    return func(*args, **kwargs)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/pool.py:439: in command
    self._raise_connection_failure(error)
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/pool.py:411: in command
    return command(
/usr/local/lib/python3.11/site-packages/pymongo/synchronous/network.py:198: in command
    reply = receive_message(conn, request_id)
/usr/local/lib/python3.11/site-packages/pymongo/network_layer.py:751: in receive_message
    length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline))
/usr/local/lib/python3.11/site-packages/pymongo/network_layer.py:351: in receive_data
    chunk_length = conn.conn.recv_into(mv[bytes_read:])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <pymongo.network_layer.NetworkingInterface object at 0x7f5c249af0d0>
buffer = <memory at 0x7f5c25127580>

    def recv_into(self, buffer: bytes) -> int:
>       return self.conn.recv_into(buffer)
E       Failed: Timeout (>600.0s) from pytest-timeout.

/usr/local/lib/python3.11/site-packages/pymongo/network_layer.py:461: Failed