[PBM-1171] support mixed env in pre-check validation #484
This check has been archived and is scheduled for deletion.
Learn more about checks retention
GitHub Actions / JUnit Test Report
failed
Sep 14, 2023 in 0s
14 tests run, 13 passed, 0 skipped, 1 failed.
Annotations
Check failure on line 77 in psmdb-testing/pbm-functional/pytest/test_fresh_sharded.py
github-actions / JUnit Test Report
test_fresh_sharded.test_logical
AssertionError: Starting restore 2023-09-14T15:46:56.565907282Z from '2023-09-14T15:46:31Z'...Started logical restore.
Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1694706390.1694706396: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1694706390,"I":21},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.changelog","Object":[{"Key":"_id","Value":"rs201:27017-2023-09-14T15:46:30.487+00:00-65032ad632d0d29f9599f874"},{"Key":"server","Value":"rs201:27017"},{"Key":"shard","Value":"rs2"},{"Key":"clientAddr","Value":"172.18.0.15:38834"},{"Key":"time","Value":"2023-09-14T15:46:30.487Z"},{"Key":"what","Value":"shardCollection.end"},{"Key":"ns","Value":"test.test"},{"Key":"details","Value":[{"Key":"version","Value":"1|3||65032ad632d0d29f9599f83e"},{"Key":"numChunks","Value":4}]}],"Query":null,"UI":{"Subtype":4,"Data":"rpnfiBnsT0Ok6jcru1iGBw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (ae99df88-19ec-4f43-a4ea-372bbb588607): { ts: Timestamp(1694706390, 21), t: 1, h: null, v: 2, op: "i", ns: "config.changelog", o: { _id: "rs201:27017-2023-09-14T15:46:30.487+00:00-65032ad632d0d29f9599f874", server: "rs201:27017", shard: "rs2", clientAddr: "172.18.0.15:38834", time: new Date(1694706390487), what: "shardCollection.end", ns: "test.test", details: { version: "1|3||65032ad632d0d29f9599f83e", numChunks: 4 } }, ui: UUID("ae99df88-19ec-4f43-a4ea-372bbb588607"), wall: new Date(0) }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fb8b8024990>
newcluster = <cluster.Cluster object at 0x7fb8b8026e50>
@pytest.mark.testcase(test_case_key="T208", test_step_key=1)
@pytest.mark.timeout(300,func_only=True)
def test_logical(start_cluster,cluster,newcluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
backup=cluster.make_backup("logical")
cluster.destroy()
newcluster.make_resync()
> newcluster.make_restore(backup,check_pbm_status=True)
test_fresh_sharded.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cluster.Cluster object at 0x7fb8b8026e50>, name = '2023-09-14T15:46:31Z'
kwargs = {'check_pbm_status': True}
client = MongoClient(host=['newmongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(command=b'timeout 180 pbm restore 2023-09-14T15:46:31Z --wait', exit_status=1, stdout=b'Starting restore...d0d29f9599f83e", numChunks: 4 } }, ui: UUID("ae99df88-19ec-4f43-a4ea-372bbb588607"), wall: new Date(0) }\n', stderr='')
n = <testinfra.host.Host docker://newrscfg01>, timeout = 1694706476.272486
def make_restore(self, name, **kwargs):
if self.layout == "sharded":
client = pymongo.MongoClient(self.connection)
result = client.admin.command("balancerStop")
client.close()
Cluster.log("Stopping balancer: " + str(result))
self.stop_mongos()
self.stop_arbiters()
n = testinfra.get_host("docker://" + self.pbm_cli)
timeout = time.time() + 60
while True:
if not self.get_status()['running']:
break
if time.time() > timeout:
assert False, "Cannot start restore, another operation running"
time.sleep(1)
Cluster.log("Restore started")
result = n.run('timeout 180 pbm restore ' + name + ' --wait')
if result.rc == 124:
# try to catch possible failures if timeout exceeded
for host in self.mongod_hosts:
try:
container = docker.from_env().containers.get(host)
get_logs = container.exec_run(
'cat /var/lib/mongo/pbm.restore.log', stderr=False)
if get_logs.exit_code == 0:
Cluster.log(
"!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
Cluster.log(get_logs.output.decode('utf-8'))
except docker.errors.APIError:
pass
assert False, "Timeout for restore exceeded"
elif result.rc == 0:
Cluster.log(result.stdout)
else:
> assert False, result.stdout + result.stderr
E AssertionError: Starting restore 2023-09-14T15:46:56.565907282Z from '2023-09-14T15:46:31Z'...Started logical restore.
E Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1694706390.1694706396: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1694706390,"I":21},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.changelog","Object":[{"Key":"_id","Value":"rs201:27017-2023-09-14T15:46:30.487+00:00-65032ad632d0d29f9599f874"},{"Key":"server","Value":"rs201:27017"},{"Key":"shard","Value":"rs2"},{"Key":"clientAddr","Value":"172.18.0.15:38834"},{"Key":"time","Value":"2023-09-14T15:46:30.487Z"},{"Key":"what","Value":"shardCollection.end"},{"Key":"ns","Value":"test.test"},{"Key":"details","Value":[{"Key":"version","Value":"1|3||65032ad632d0d29f9599f83e"},{"Key":"numChunks","Value":4}]}],"Query":null,"UI":{"Subtype":4,"Data":"rpnfiBnsT0Ok6jcru1iGBw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (ae99df88-19ec-4f43-a4ea-372bbb588607): { ts: Timestamp(1694706390, 21), t: 1, h: null, v: 2, op: "i", ns: "config.changelog", o: { _id: "rs201:27017-2023-09-14T15:46:30.487+00:00-65032ad632d0d29f9599f874", server: "rs201:27017", shard: "rs2", clientAddr: "172.18.0.15:38834", time: new Date(1694706390487), what: "shardCollection.end", ns: "test.test", details: { version: "1|3||65032ad632d0d29f9599f83e", numChunks: 4 } }, ui: UUID("ae99df88-19ec-4f43-a4ea-372bbb588607"), wall: new Date(0) }
cluster.py:450: AssertionError
Loading