diff --git a/.flake8 b/.flake8
index 2e4387498..87f6e408c 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index 1b3cb6c52..eb4d9f794 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -13,5 +13,5 @@
# limitations under the License.
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b
-# created: 2023-06-27T13:04:21.96690344Z
+ digest: sha256:bacc3af03bff793a03add584537b36b5644342931ad989e3ba1171d3bd5399f5
+# created: 2023-11-23T18:17:28.105124211Z
diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml
index 41bff0b53..b2016d119 100644
--- a/.github/auto-label.yaml
+++ b/.github/auto-label.yaml
@@ -1,4 +1,4 @@
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index e97d89e48..221806ced 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -28,7 +28,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v4
with:
- python-version: "3.9"
+ python-version: "3.10"
- name: Install nox
run: |
python -m pip install --upgrade setuptools pip wheel
diff --git a/.github/workflows/system_emulated.yml b/.github/workflows/system_emulated.yml
index e1f43fd40..f1aa7e87c 100644
--- a/.github/workflows/system_emulated.yml
+++ b/.github/workflows/system_emulated.yml
@@ -7,7 +7,7 @@ on:
jobs:
run-systests:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
@@ -20,7 +20,7 @@ jobs:
python-version: '3.8'
- name: Setup GCloud SDK
- uses: google-github-actions/setup-gcloud@v1.1.0
+ uses: google-github-actions/setup-gcloud@v1.1.1
- name: Install / run Nox
run: |
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
index 8057a7691..a32027b49 100644
--- a/.github/workflows/unittest.yml
+++ b/.github/workflows/unittest.yml
@@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python: ['3.7', '3.8', '3.9', '3.10', '3.11']
+ python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- name: Checkout
uses: actions/checkout@v3
diff --git a/.gitignore b/.gitignore
index b4243ced7..d083ea1dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,7 @@ docs.metadata
# Virtual environment
env/
+venv/
# Test logs
coverage.xml
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index 2ab1155b2..dec6b66a7 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2018 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile
index f8137d0ae..8e39a2cc4 100644
--- a/.kokoro/docker/docs/Dockerfile
+++ b/.kokoro/docker/docs/Dockerfile
@@ -1,4 +1,4 @@
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh
index f52514257..6f3972140 100755
--- a/.kokoro/populate-secrets.sh
+++ b/.kokoro/populate-secrets.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC.
+# Copyright 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh
index 1c4d62370..9eafe0be3 100755
--- a/.kokoro/publish-docs.sh
+++ b/.kokoro/publish-docs.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/release.sh b/.kokoro/release.sh
index 6b594c813..2e1cbfa81 100755
--- a/.kokoro/release.sh
+++ b/.kokoro/release.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index 8477e4ca6..2a8fd970c 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -38,3 +38,12 @@ env_vars: {
key: "SECRET_MANAGER_KEYS"
value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
}
+
+# Store the packages we uploaded to PyPI. That way, we have a record of exactly
+# what we published, which we can use to generate SBOMs and attestations.
+action {
+ define_artifacts {
+ regex: "github/python-bigtable/**/*.tar.gz"
+ strip_prefix: "github/python-bigtable"
+ }
+}
diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt
index c7929db6d..8957e2110 100644
--- a/.kokoro/requirements.txt
+++ b/.kokoro/requirements.txt
@@ -4,91 +4,75 @@
#
# pip-compile --allow-unsafe --generate-hashes requirements.in
#
-argcomplete==2.0.0 \
- --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \
- --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e
+argcomplete==3.1.4 \
+ --hash=sha256:72558ba729e4c468572609817226fb0a6e7e9a0a7d477b882be168c0b4a62b94 \
+ --hash=sha256:fbe56f8cda08aa9a04b307d8482ea703e96a6a801611acb4be9bf3942017989f
# via nox
-attrs==22.1.0 \
- --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \
- --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c
+attrs==23.1.0 \
+ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \
+ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015
# via gcp-releasetool
-bleach==5.0.1 \
- --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \
- --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c
- # via readme-renderer
-cachetools==5.2.0 \
- --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
- --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
+cachetools==5.3.2 \
+ --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \
+ --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1
# via google-auth
-certifi==2022.12.7 \
- --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
- --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+certifi==2023.7.22 \
+ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \
+ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9
# via requests
-cffi==1.15.1 \
- --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \
- --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \
- --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \
- --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \
- --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \
- --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \
- --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \
- --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \
- --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \
- --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \
- --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \
- --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \
- --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \
- --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \
- --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \
- --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \
- --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \
- --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \
- --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \
- --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \
- --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \
- --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \
- --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \
- --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \
- --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \
- --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \
- --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \
- --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \
- --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \
- --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \
- --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \
- --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \
- --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \
- --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \
- --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \
- --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \
- --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \
- --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \
- --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \
- --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \
- --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \
- --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \
- --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \
- --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \
- --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \
- --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \
- --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \
- --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \
- --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \
- --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \
- --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \
- --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \
- --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \
- --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \
- --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \
- --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \
- --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \
- --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \
- --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \
- --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \
- --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \
- --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \
- --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \
- --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0
+cffi==1.16.0 \
+ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \
+ --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \
+ --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \
+ --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \
+ --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \
+ --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \
+ --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \
+ --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \
+ --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \
+ --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \
+ --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \
+ --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \
+ --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \
+ --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \
+ --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \
+ --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \
+ --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \
+ --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \
+ --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \
+ --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \
+ --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \
+ --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \
+ --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \
+ --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \
+ --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \
+ --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \
+ --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \
+ --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \
+ --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \
+ --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \
+ --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \
+ --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \
+ --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \
+ --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \
+ --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \
+ --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \
+ --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \
+ --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \
+ --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \
+ --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \
+ --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \
+ --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \
+ --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \
+ --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \
+ --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \
+ --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \
+ --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \
+ --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \
+ --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \
+ --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \
+ --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \
+ --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357
# via cryptography
charset-normalizer==2.1.1 \
--hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \
@@ -109,74 +93,74 @@ colorlog==6.7.0 \
# via
# gcp-docuploader
# nox
-commonmark==0.9.1 \
- --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
- --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
- # via rich
-cryptography==41.0.0 \
- --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \
- --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \
- --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \
- --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \
- --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \
- --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \
- --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \
- --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \
- --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \
- --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \
- --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \
- --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \
- --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \
- --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \
- --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \
- --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \
- --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \
- --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \
- --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be
+cryptography==41.0.5 \
+ --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \
+ --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \
+ --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \
+ --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \
+ --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \
+ --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \
+ --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \
+ --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \
+ --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \
+ --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \
+ --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \
+ --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \
+ --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \
+ --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \
+ --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \
+ --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \
+ --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \
+ --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \
+ --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \
+ --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \
+ --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \
+ --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \
+ --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723
# via
# gcp-releasetool
# secretstorage
-distlib==0.3.6 \
- --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \
- --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e
+distlib==0.3.7 \
+ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \
+ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8
# via virtualenv
-docutils==0.19 \
- --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \
- --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc
+docutils==0.20.1 \
+ --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \
+ --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b
# via readme-renderer
-filelock==3.8.0 \
- --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \
- --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4
+filelock==3.13.1 \
+ --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \
+ --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c
# via virtualenv
-gcp-docuploader==0.6.4 \
- --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \
- --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf
+gcp-docuploader==0.6.5 \
+ --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \
+ --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea
# via -r requirements.in
-gcp-releasetool==1.10.5 \
- --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \
- --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9
+gcp-releasetool==1.16.0 \
+ --hash=sha256:27bf19d2e87aaa884096ff941aa3c592c482be3d6a2bfe6f06afafa6af2353e3 \
+ --hash=sha256:a316b197a543fd036209d0caba7a8eb4d236d8e65381c80cbc6d7efaa7606d63
# via -r requirements.in
-google-api-core==2.10.2 \
- --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \
- --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e
+google-api-core==2.12.0 \
+ --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \
+ --hash=sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160
# via
# google-cloud-core
# google-cloud-storage
-google-auth==2.14.1 \
- --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \
- --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016
+google-auth==2.23.4 \
+ --hash=sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3 \
+ --hash=sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2
# via
# gcp-releasetool
# google-api-core
# google-cloud-core
# google-cloud-storage
-google-cloud-core==2.3.2 \
- --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \
- --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a
+google-cloud-core==2.3.3 \
+ --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \
+ --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863
# via google-cloud-storage
-google-cloud-storage==2.6.0 \
- --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \
- --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9
+google-cloud-storage==2.13.0 \
+ --hash=sha256:ab0bf2e1780a1b74cf17fccb13788070b729f50c252f0c94ada2aae0ca95437d \
+ --hash=sha256:f62dc4c7b6cd4360d072e3deb28035fbdad491ac3d9b0b1815a12daea10f37c7
# via gcp-docuploader
google-crc32c==1.5.0 \
--hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \
@@ -247,29 +231,31 @@ google-crc32c==1.5.0 \
--hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \
--hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \
--hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4
- # via google-resumable-media
-google-resumable-media==2.4.0 \
- --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \
- --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f
+ # via
+ # google-cloud-storage
+ # google-resumable-media
+google-resumable-media==2.6.0 \
+ --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \
+ --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b
# via google-cloud-storage
-googleapis-common-protos==1.57.0 \
- --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \
- --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c
+googleapis-common-protos==1.61.0 \
+ --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \
+ --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b
# via google-api-core
idna==3.4 \
--hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
--hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
# via requests
-importlib-metadata==5.0.0 \
- --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
- --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
+importlib-metadata==6.8.0 \
+ --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \
+ --hash=sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743
# via
# -r requirements.in
# keyring
# twine
-jaraco-classes==3.2.3 \
- --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \
- --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a
+jaraco-classes==3.3.0 \
+ --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \
+ --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621
# via keyring
jeepney==0.8.0 \
--hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \
@@ -281,75 +267,121 @@ jinja2==3.1.2 \
--hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
--hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
# via gcp-releasetool
-keyring==23.11.0 \
- --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \
- --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361
+keyring==24.2.0 \
+ --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \
+ --hash=sha256:ca0746a19ec421219f4d713f848fa297a661a8a8c1504867e55bfb5e09091509
# via
# gcp-releasetool
# twine
-markupsafe==2.1.1 \
- --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \
- --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \
- --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \
- --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \
- --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \
- --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \
- --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \
- --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \
- --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \
- --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \
- --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \
- --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \
- --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \
- --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \
- --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \
- --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \
- --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \
- --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \
- --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \
- --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \
- --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \
- --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \
- --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \
- --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \
- --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \
- --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \
- --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \
- --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \
- --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \
- --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \
- --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \
- --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \
- --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \
- --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \
- --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \
- --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \
- --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \
- --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \
- --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \
- --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7
+markdown-it-py==3.0.0 \
+ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+ # via rich
+markupsafe==2.1.3 \
+ --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
+ --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
+ --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \
+ --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \
+ --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \
+ --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \
+ --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \
+ --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \
+ --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \
+ --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \
+ --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \
+ --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \
+ --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \
+ --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \
+ --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \
+ --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \
+ --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \
+ --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \
+ --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \
+ --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \
+ --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \
+ --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \
+ --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \
+ --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \
+ --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \
+ --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \
+ --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \
+ --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \
+ --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \
+ --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \
+ --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \
+ --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \
+ --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \
+ --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \
+ --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \
+ --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \
+ --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \
+ --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \
+ --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \
+ --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \
+ --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \
+ --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \
+ --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \
+ --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \
+ --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \
+ --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \
+ --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \
+ --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \
+ --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \
+ --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \
+ --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \
+ --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \
+ --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \
+ --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \
+ --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \
+ --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \
+ --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \
+ --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \
+ --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \
+ --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11
# via jinja2
-more-itertools==9.0.0 \
- --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \
- --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+more-itertools==10.1.0 \
+ --hash=sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a \
+ --hash=sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6
# via jaraco-classes
-nox==2022.11.21 \
- --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \
- --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684
+nh3==0.2.14 \
+ --hash=sha256:116c9515937f94f0057ef50ebcbcc10600860065953ba56f14473ff706371873 \
+ --hash=sha256:18415df36db9b001f71a42a3a5395db79cf23d556996090d293764436e98e8ad \
+ --hash=sha256:203cac86e313cf6486704d0ec620a992c8bc164c86d3a4fd3d761dd552d839b5 \
+ --hash=sha256:2b0be5c792bd43d0abef8ca39dd8acb3c0611052ce466d0401d51ea0d9aa7525 \
+ --hash=sha256:377aaf6a9e7c63962f367158d808c6a1344e2b4f83d071c43fbd631b75c4f0b2 \
+ --hash=sha256:525846c56c2bcd376f5eaee76063ebf33cf1e620c1498b2a40107f60cfc6054e \
+ --hash=sha256:5529a3bf99402c34056576d80ae5547123f1078da76aa99e8ed79e44fa67282d \
+ --hash=sha256:7771d43222b639a4cd9e341f870cee336b9d886de1ad9bec8dddab22fe1de450 \
+ --hash=sha256:88c753efbcdfc2644a5012938c6b9753f1c64a5723a67f0301ca43e7b85dcf0e \
+ --hash=sha256:93a943cfd3e33bd03f77b97baa11990148687877b74193bf777956b67054dcc6 \
+ --hash=sha256:9be2f68fb9a40d8440cbf34cbf40758aa7f6093160bfc7fb018cce8e424f0c3a \
+ --hash=sha256:a0c509894fd4dccdff557068e5074999ae3b75f4c5a2d6fb5415e782e25679c4 \
+ --hash=sha256:ac8056e937f264995a82bf0053ca898a1cb1c9efc7cd68fa07fe0060734df7e4 \
+ --hash=sha256:aed56a86daa43966dd790ba86d4b810b219f75b4bb737461b6886ce2bde38fd6 \
+ --hash=sha256:e8986f1dd3221d1e741fda0a12eaa4a273f1d80a35e31a1ffe579e7c621d069e \
+ --hash=sha256:f99212a81c62b5f22f9e7c3e347aa00491114a5647e1f13bbebd79c3e5f08d75
+ # via readme-renderer
+nox==2023.4.22 \
+ --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \
+ --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f
# via -r requirements.in
-packaging==21.3 \
- --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \
- --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522
+packaging==23.2 \
+ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
+ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7
# via
# gcp-releasetool
# nox
-pkginfo==1.8.3 \
- --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \
- --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c
+pkginfo==1.9.6 \
+ --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \
+ --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046
# via twine
-platformdirs==2.5.4 \
- --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \
- --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10
+platformdirs==3.11.0 \
+ --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \
+ --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e
# via virtualenv
protobuf==3.20.3 \
--hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \
@@ -378,34 +410,31 @@ protobuf==3.20.3 \
# gcp-docuploader
# gcp-releasetool
# google-api-core
-pyasn1==0.4.8 \
- --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \
- --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba
+ # googleapis-common-protos
+pyasn1==0.5.0 \
+ --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \
+ --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde
# via
# pyasn1-modules
# rsa
-pyasn1-modules==0.2.8 \
- --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \
- --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74
+pyasn1-modules==0.3.0 \
+ --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \
+ --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d
# via google-auth
pycparser==2.21 \
--hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
--hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
# via cffi
-pygments==2.13.0 \
- --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \
- --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42
+pygments==2.16.1 \
+ --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \
+ --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29
# via
# readme-renderer
# rich
-pyjwt==2.6.0 \
- --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \
- --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14
+pyjwt==2.8.0 \
+ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \
+ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320
# via gcp-releasetool
-pyparsing==3.0.9 \
- --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \
- --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc
- # via packaging
pyperclip==1.8.2 \
--hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57
# via gcp-releasetool
@@ -413,9 +442,9 @@ python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
# via gcp-releasetool
-readme-renderer==37.3 \
- --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \
- --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343
+readme-renderer==42.0 \
+ --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \
+ --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1
# via twine
requests==2.31.0 \
--hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
@@ -426,17 +455,17 @@ requests==2.31.0 \
# google-cloud-storage
# requests-toolbelt
# twine
-requests-toolbelt==0.10.1 \
- --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \
- --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d
+requests-toolbelt==1.0.0 \
+ --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \
+ --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06
# via twine
rfc3986==2.0.0 \
--hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \
--hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c
# via twine
-rich==12.6.0 \
- --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \
- --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0
+rich==13.6.0 \
+ --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \
+ --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef
# via twine
rsa==4.9 \
--hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
@@ -450,43 +479,37 @@ six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
--hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
# via
- # bleach
# gcp-docuploader
- # google-auth
# python-dateutil
-twine==4.0.1 \
- --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \
- --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0
+twine==4.0.2 \
+ --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \
+ --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8
# via -r requirements.in
-typing-extensions==4.4.0 \
- --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
- --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
+typing-extensions==4.8.0 \
+ --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \
+ --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef
# via -r requirements.in
-urllib3==1.26.12 \
- --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \
- --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997
+urllib3==2.0.7 \
+ --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \
+ --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e
# via
# requests
# twine
-virtualenv==20.16.7 \
- --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \
- --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29
+virtualenv==20.24.6 \
+ --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \
+ --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381
# via nox
-webencodings==0.5.1 \
- --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \
- --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923
- # via bleach
-wheel==0.38.4 \
- --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \
- --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8
+wheel==0.41.3 \
+ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \
+ --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841
# via -r requirements.in
-zipp==3.10.0 \
- --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \
- --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8
+zipp==3.17.0 \
+ --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \
+ --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
-setuptools==65.5.1 \
- --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \
- --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f
+setuptools==68.2.2 \
+ --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \
+ --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a
# via -r requirements.in
diff --git a/.kokoro/samples/python3.12/common.cfg b/.kokoro/samples/python3.12/common.cfg
new file mode 100644
index 000000000..34e0a95f3
--- /dev/null
+++ b/.kokoro/samples/python3.12/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.12"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-312"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-bigtable/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.12/continuous.cfg b/.kokoro/samples/python3.12/continuous.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.12/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.12/periodic-head.cfg b/.kokoro/samples/python3.12/periodic-head.cfg
new file mode 100644
index 000000000..be25a34f9
--- /dev/null
+++ b/.kokoro/samples/python3.12/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-bigtable/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.12/periodic.cfg b/.kokoro/samples/python3.12/periodic.cfg
new file mode 100644
index 000000000..71cd1e597
--- /dev/null
+++ b/.kokoro/samples/python3.12/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/.kokoro/samples/python3.12/presubmit.cfg b/.kokoro/samples/python3.12/presubmit.cfg
new file mode 100644
index 000000000..a1c8d9759
--- /dev/null
+++ b/.kokoro/samples/python3.12/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
index ba3a707b0..63ac41dfa 100755
--- a/.kokoro/test-samples-against-head.sh
+++ b/.kokoro/test-samples-against-head.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh
index 2c6500cae..5a0f5fab6 100755
--- a/.kokoro/test-samples-impl.sh
+++ b/.kokoro/test-samples-impl.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2021 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 11c042d34..50b35a48c 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh
index a4241db23..5c7c8633a 100755
--- a/.kokoro/trampoline.sh
+++ b/.kokoro/trampoline.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 Google Inc.
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
index 4af6cdc26..59a7cf3a9 100755
--- a/.kokoro/trampoline_v2.sh
+++ b/.kokoro/trampoline_v2.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 5405cc8ff..6a8e16950 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,4 +1,4 @@
-# Copyright 2021 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,10 +22,10 @@ repos:
- id: end-of-file-fixer
- id: check-yaml
- repo: https://github.com/psf/black
- rev: 22.3.0
+ rev: 23.7.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
- rev: 3.9.2
+ rev: 6.1.0
hooks:
- id: flake8
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b7f666a68..5be20145a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "2.19.0"
+ ".": "2.21.0"
}
\ No newline at end of file
diff --git a/.trampolinerc b/.trampolinerc
index 0eee72ab6..a7dfeb42c 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -1,4 +1,4 @@
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Template for .trampolinerc
-
# Add required env vars here.
required_envvars+=(
)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dc80386a4..1a2a6ad3a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,36 @@
[1]: https://pypi.org/project/google-cloud-bigtable/#history
+## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02)
+
+
+### Features
+
+* Add last_scanned_row_responses to FeatureFlags ([#845](https://github.com/googleapis/python-bigtable/issues/845)) ([14a6739](https://github.com/googleapis/python-bigtable/commit/14a673901f82fa247c8027730a0bba41e0ec4757))
+
+
+### Documentation
+
+* Minor formatting ([#851](https://github.com/googleapis/python-bigtable/issues/851)) ([5ebe231](https://github.com/googleapis/python-bigtable/commit/5ebe2312dab70210811fca68c6625d2546442afd))
+
+## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17)
+
+
+### Features
+
+* Add experimental reverse scan for public preview ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304))
+* Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304))
+
+
+### Bug Fixes
+
+* Add async context manager return types ([#828](https://github.com/googleapis/python-bigtable/issues/828)) ([475a160](https://github.com/googleapis/python-bigtable/commit/475a16072f3ad41357bdb765fff608a39141ec00))
+
+
+### Documentation
+
+* Fix formatting for reversed order field example ([#831](https://github.com/googleapis/python-bigtable/issues/831)) ([fddd0ba](https://github.com/googleapis/python-bigtable/commit/fddd0ba97155e112af92a98fd8f20e59b139d177))
+
## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 504fb3742..947c129b7 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows.
+ 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.11 -- -k
+ $ nox -s unit-3.12 -- -k
.. note::
@@ -226,12 +226,14 @@ We support:
- `Python 3.9`_
- `Python 3.10`_
- `Python 3.11`_
+- `Python 3.12`_
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
.. _Python 3.9: https://docs.python.org/3.9/
.. _Python 3.10: https://docs.python.org/3.10/
.. _Python 3.11: https://docs.python.org/3.11/
+.. _Python 3.12: https://docs.python.org/3.12/
Supported versions can be found in our ``noxfile.py`` `config`_.
diff --git a/MANIFEST.in b/MANIFEST.in
index e783f4c62..e0a667053 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/docs/conf.py b/docs/conf.py
index 34f3a4d08..b5a870f58 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2021 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/docs/snippets.py b/docs/snippets.py
index 1d93fdf12..fa3aa3627 100644
--- a/docs/snippets.py
+++ b/docs/snippets.py
@@ -448,7 +448,6 @@ def test_bigtable_create_table():
def test_bigtable_list_tables():
-
# [START bigtable_api_list_tables]
from google.cloud.bigtable import Client
diff --git a/docs/snippets_table.py b/docs/snippets_table.py
index f27260425..893135275 100644
--- a/docs/snippets_table.py
+++ b/docs/snippets_table.py
@@ -964,7 +964,6 @@ def test_bigtable_create_family_gc_nested():
def test_bigtable_row_data_cells_cell_value_cell_values():
-
value = b"value_in_col1"
row = Config.TABLE.row(b"row_key_1")
row.set_cell(
diff --git a/google/__init__.py b/google/__init__.py
deleted file mode 100644
index a5ba80656..000000000
--- a/google/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- pass
diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py
deleted file mode 100644
index a5ba80656..000000000
--- a/google/cloud/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-try:
- import pkg_resources
-
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- pass
diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py
index a6eb806e9..f9b85386d 100644
--- a/google/cloud/bigtable/batcher.py
+++ b/google/cloud/bigtable/batcher.py
@@ -53,12 +53,19 @@ def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT
self.flush_count = flush_count
def get(self):
- """Retrieve an item from the queue. Recalculate queue size."""
- row = self._queue.get()
- mutation_size = row.get_mutations_size()
- self.total_mutation_count -= len(row._get_mutations())
- self.total_size -= mutation_size
- return row
+ """
+ Retrieve an item from the queue. Recalculate queue size.
+
+ If the queue is empty, return None.
+ """
+ try:
+ row = self._queue.get_nowait()
+ mutation_size = row.get_mutations_size()
+ self.total_mutation_count -= len(row._get_mutations())
+ self.total_size -= mutation_size
+ return row
+ except queue.Empty:
+ return None
def put(self, item):
"""Insert an item to the queue. Recalculate queue size."""
@@ -79,9 +86,6 @@ def full(self):
return True
return False
- def empty(self):
- return self._queue.empty()
-
@dataclass
class _BatchInfo:
@@ -110,6 +114,7 @@ def __init__(
self.inflight_size = 0
self.event = threading.Event()
self.event.set()
+ self._lock = threading.Lock()
def is_blocked(self):
"""Returns True if:
@@ -128,8 +133,9 @@ def control_flow(self, batch_info):
Calculate the resources used by this batch
"""
- self.inflight_mutations += batch_info.mutations_count
- self.inflight_size += batch_info.mutations_size
+ with self._lock:
+ self.inflight_mutations += batch_info.mutations_count
+ self.inflight_size += batch_info.mutations_size
self.set_flow_control_status()
def wait(self):
@@ -154,8 +160,9 @@ def release(self, batch_info):
Release the resources.
Decrement the row size to allow enqueued mutations to be run.
"""
- self.inflight_mutations -= batch_info.mutations_count
- self.inflight_size -= batch_info.mutations_size
+ with self._lock:
+ self.inflight_mutations -= batch_info.mutations_count
+ self.inflight_size -= batch_info.mutations_size
self.set_flow_control_status()
@@ -292,8 +299,10 @@ def flush(self):
* :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
"""
rows_to_flush = []
- while not self._rows.empty():
- rows_to_flush.append(self._rows.get())
+ row = self._rows.get()
+ while row is not None:
+ rows_to_flush.append(row)
+ row = self._rows.get()
response = self._flush_rows(rows_to_flush)
return response
@@ -303,58 +312,68 @@ def _flush_async(self):
:raises:
* :exc:`.batcherMutationsBatchError` if there's any error in the mutations.
"""
-
- rows_to_flush = []
- mutations_count = 0
- mutations_size = 0
- rows_count = 0
- batch_info = _BatchInfo()
-
- while not self._rows.empty():
- row = self._rows.get()
- mutations_count += len(row._get_mutations())
- mutations_size += row.get_mutations_size()
- rows_count += 1
- rows_to_flush.append(row)
- batch_info.mutations_count = mutations_count
- batch_info.rows_count = rows_count
- batch_info.mutations_size = mutations_size
-
- if (
- rows_count >= self.flush_count
- or mutations_size >= self.max_row_bytes
- or mutations_count >= self.flow_control.max_mutations
- or mutations_size >= self.flow_control.max_mutation_bytes
- or self._rows.empty() # submit when it reached the end of the queue
+ next_row = self._rows.get()
+ while next_row is not None:
+ # start a new batch
+ rows_to_flush = [next_row]
+ batch_info = _BatchInfo(
+ mutations_count=len(next_row._get_mutations()),
+ rows_count=1,
+ mutations_size=next_row.get_mutations_size(),
+ )
+ # fill up batch with rows
+ next_row = self._rows.get()
+ while next_row is not None and self._row_fits_in_batch(
+ next_row, batch_info
):
- # wait for resources to become available, before submitting any new batch
- self.flow_control.wait()
- # once unblocked, submit a batch
- # event flag will be set by control_flow to block subsequent thread, but not blocking this one
- self.flow_control.control_flow(batch_info)
- future = self._executor.submit(self._flush_rows, rows_to_flush)
- self.futures_mapping[future] = batch_info
- future.add_done_callback(self._batch_completed_callback)
-
- # reset and start a new batch
- rows_to_flush = []
- mutations_size = 0
- rows_count = 0
- mutations_count = 0
- batch_info = _BatchInfo()
+ rows_to_flush.append(next_row)
+ batch_info.mutations_count += len(next_row._get_mutations())
+ batch_info.rows_count += 1
+ batch_info.mutations_size += next_row.get_mutations_size()
+ next_row = self._rows.get()
+ # send batch over network
+ # wait for resources to become available
+ self.flow_control.wait()
+ # once unblocked, submit the batch
+ # event flag will be set by control_flow to block subsequent thread, but not blocking this one
+ self.flow_control.control_flow(batch_info)
+ future = self._executor.submit(self._flush_rows, rows_to_flush)
+ # schedule release of resources from flow control
+ self.futures_mapping[future] = batch_info
+ future.add_done_callback(self._batch_completed_callback)
def _batch_completed_callback(self, future):
"""Callback for when the mutation has finished to clean up the current batch
and release items from the flow controller.
-
Raise exceptions if there's any.
Release the resources locked by the flow control and allow enqueued tasks to be run.
"""
-
processed_rows = self.futures_mapping[future]
self.flow_control.release(processed_rows)
del self.futures_mapping[future]
+ def _row_fits_in_batch(self, row, batch_info):
+ """Checks if a row can fit in the current batch.
+
+ :type row: class
+ :param row: :class:`~google.cloud.bigtable.row.DirectRow`.
+
+ :type batch_info: :class:`_BatchInfo`
+ :param batch_info: Information about the current batch.
+
+ :rtype: bool
+ :returns: True if the row can fit in the current batch.
+ """
+ new_rows_count = batch_info.rows_count + 1
+ new_mutations_count = batch_info.mutations_count + len(row._get_mutations())
+ new_mutations_size = batch_info.mutations_size + row.get_mutations_size()
+ return (
+ new_rows_count <= self.flush_count
+ and new_mutations_size <= self.max_row_bytes
+ and new_mutations_count <= self.flow_control.max_mutations
+ and new_mutations_size <= self.flow_control.max_mutation_bytes
+ )
+
def _flush_rows(self, rows_to_flush):
"""Mutate the specified rows.
diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py
index 0f1a446f3..e546bae05 100644
--- a/google/cloud/bigtable/gapic_version.py
+++ b/google/cloud/bigtable/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.19.0" # {x-release-please-version}
+__version__ = "2.21.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py
index 0ba93ec63..d26d79b3c 100644
--- a/google/cloud/bigtable_admin/__init__.py
+++ b/google/cloud/bigtable_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -115,6 +115,8 @@
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CheckConsistencyResponse,
)
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata
+from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest
from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import (
CreateBackupMetadata,
)
@@ -242,6 +244,8 @@
"UpdateInstanceMetadata",
"CheckConsistencyRequest",
"CheckConsistencyResponse",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
"CreateTableFromSnapshotMetadata",
diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py
index 0f1a446f3..e546bae05 100644
--- a/google/cloud/bigtable_admin/gapic_version.py
+++ b/google/cloud/bigtable_admin/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.19.0" # {x-release-please-version}
+__version__ = "2.21.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py
index c030ec1bd..811b956e0 100644
--- a/google/cloud/bigtable_admin_v2/__init__.py
+++ b/google/cloud/bigtable_admin_v2/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -51,6 +51,8 @@
from .types.bigtable_instance_admin import UpdateInstanceMetadata
from .types.bigtable_table_admin import CheckConsistencyRequest
from .types.bigtable_table_admin import CheckConsistencyResponse
+from .types.bigtable_table_admin import CopyBackupMetadata
+from .types.bigtable_table_admin import CopyBackupRequest
from .types.bigtable_table_admin import CreateBackupMetadata
from .types.bigtable_table_admin import CreateBackupRequest
from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata
@@ -116,6 +118,8 @@
"CheckConsistencyResponse",
"Cluster",
"ColumnFamily",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateAppProfileRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json
index d797338cc..9b3426470 100644
--- a/google/cloud/bigtable_admin_v2/gapic_metadata.json
+++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json
@@ -349,6 +349,11 @@
"check_consistency"
]
},
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
"CreateBackup": {
"methods": [
"create_backup"
@@ -474,6 +479,11 @@
"check_consistency"
]
},
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
"CreateBackup": {
"methods": [
"create_backup"
@@ -599,6 +609,11 @@
"check_consistency"
]
},
+ "CopyBackup": {
+ "methods": [
+ "copy_backup"
+ ]
+ },
"CreateBackup": {
"methods": [
"create_backup"
diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py
index 0f1a446f3..e546bae05 100644
--- a/google/cloud/bigtable_admin_v2/gapic_version.py
+++ b/google/cloud/bigtable_admin_v2/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.19.0" # {x-release-please-version}
+__version__ = "2.21.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_admin_v2/services/__init__.py b/google/cloud/bigtable_admin_v2/services/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/google/cloud/bigtable_admin_v2/services/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py
index 1fb10736e..40631d1b4 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
index 12811bcea..e4c4639af 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,14 +33,14 @@
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
-from google.api_core import retry as retries
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object] # type: ignore
+ OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -305,7 +305,7 @@ async def create_instance(
This corresponds to the ``clusters`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -404,7 +404,7 @@ async def get_instance(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -440,7 +440,7 @@ async def get_instance(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_instance,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -496,7 +496,7 @@ async def list_instances(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -529,7 +529,7 @@ async def list_instances(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_instances,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -581,7 +581,7 @@ async def update_instance(
served from all
[Clusters][google.bigtable.admin.v2.Cluster] in the
instance.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -603,7 +603,7 @@ async def update_instance(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_instance,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -669,7 +669,7 @@ async def partial_update_instance(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -709,7 +709,7 @@ async def partial_update_instance(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.partial_update_instance,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -775,7 +775,7 @@ async def delete_instance(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -869,7 +869,7 @@ async def create_cluster(
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -964,7 +964,7 @@ async def get_cluster(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -999,7 +999,7 @@ async def get_cluster(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_cluster,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1057,7 +1057,7 @@ async def list_clusters(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1090,7 +1090,7 @@ async def list_clusters(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_clusters,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1141,7 +1141,7 @@ async def update_cluster(
location, capable of serving all
[Tables][google.bigtable.admin.v2.Table] in the parent
[Instance][google.bigtable.admin.v2.Instance].
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1164,7 +1164,7 @@ async def update_cluster(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_cluster,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1248,7 +1248,7 @@ async def partial_update_cluster(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1343,7 +1343,7 @@ async def delete_cluster(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1431,7 +1431,7 @@ async def create_app_profile(
This corresponds to the ``app_profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1515,7 +1515,7 @@ async def get_app_profile(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1549,7 +1549,7 @@ async def get_app_profile(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_app_profile,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1608,7 +1608,7 @@ async def list_app_profiles(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1644,7 +1644,7 @@ async def list_app_profiles(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_app_profiles,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1717,7 +1717,7 @@ async def update_app_profile(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1754,7 +1754,7 @@ async def update_app_profile(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_app_profile,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1820,7 +1820,7 @@ async def delete_app_profile(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1890,7 +1890,7 @@ async def get_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1919,42 +1919,11 @@ async def get_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -1984,7 +1953,7 @@ async def get_iam_policy(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2039,7 +2008,7 @@ async def set_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2068,42 +2037,11 @@ async def set_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2188,7 +2126,7 @@ async def test_iam_permissions(
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2222,7 +2160,7 @@ async def test_iam_permissions(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2279,7 +2217,7 @@ async def list_hot_tablets(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2315,7 +2253,7 @@ async def list_hot_tablets(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_hot_tablets,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2355,7 +2293,7 @@ async def list_hot_tablets(
# Done; return the response.
return response
- async def __aenter__(self):
+ async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
index ecc9bf1e2..52c61ea4f 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -2143,42 +2143,11 @@ def get_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2279,42 +2248,11 @@ def set_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
index bfcbbf23d..0d646a96e 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py
index e5637c0da..62da28c88 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
index bd45f319f..d92d25453 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
index f037f5a44..eca37957d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
index 82b03b0bb..145aa427d 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
index e9b94cf78..9d5502b7e 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,8 +44,8 @@
from google.cloud.bigtable_admin_v2.types import instance
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
from .base import (
BigtableInstanceAdminTransport,
@@ -1612,54 +1612,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
@@ -2439,54 +2439,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
index 515696537..544649e90 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
index 1663c16eb..5a4435bde 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,14 +33,14 @@
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
-from google.api_core import retry as retries
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object] # type: ignore
+ OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
@@ -282,7 +282,7 @@ async def create_table(
This corresponds to the ``table`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -358,6 +358,7 @@ async def create_table_from_snapshot(
r"""Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -401,7 +402,7 @@ async def create_table_from_snapshot(
This corresponds to the ``source_snapshot`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -493,7 +494,7 @@ async def list_tables(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -529,7 +530,7 @@ async def list_tables(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tables,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -592,7 +593,7 @@ async def get_table(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -627,7 +628,7 @@ async def get_table(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_table,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -700,7 +701,7 @@ async def update_table(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -793,7 +794,7 @@ async def delete_table(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -864,7 +865,7 @@ async def undelete_table(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -975,7 +976,7 @@ async def modify_column_families(
This corresponds to the ``modifications`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1050,7 +1051,7 @@ async def drop_row_range(
request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]):
The request object. Request message for
[google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1110,7 +1111,7 @@ async def generate_consistency_token(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1143,7 +1144,7 @@ async def generate_consistency_token(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.generate_consistency_token,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1210,7 +1211,7 @@ async def check_consistency(
This corresponds to the ``consistency_token`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1245,7 +1246,7 @@ async def check_consistency(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.check_consistency,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1293,6 +1294,7 @@ async def snapshot_table(
r"""Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -1342,7 +1344,7 @@ async def snapshot_table(
This corresponds to the ``description`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1457,7 +1459,7 @@ async def get_snapshot(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1469,6 +1471,7 @@ async def get_snapshot(
time. A snapshot can be used as a
checkpoint for data restoration or a
data source for a new table.
+
Note: This is a private alpha release of
Cloud Bigtable snapshots. This feature
is not currently available to most Cloud
@@ -1500,7 +1503,7 @@ async def get_snapshot(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_snapshot,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1573,7 +1576,7 @@ async def list_snapshots(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1616,7 +1619,7 @@ async def list_snapshots(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_snapshots,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1668,6 +1671,7 @@ async def delete_snapshot(
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Permanently deletes the specified snapshot.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -1694,7 +1698,7 @@ async def delete_snapshot(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1790,7 +1794,7 @@ async def create_backup(
This corresponds to the ``backup`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1882,7 +1886,7 @@ async def get_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1913,7 +1917,7 @@ async def get_backup(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_backup,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -1983,7 +1987,7 @@ async def update_backup(
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2062,7 +2066,7 @@ async def delete_backup(
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2134,7 +2138,7 @@ async def list_backups(
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2143,7 +2147,7 @@ async def list_backups(
Returns:
google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager:
The response for
- [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
+ [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
@@ -2170,7 +2174,7 @@ async def list_backups(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_backups,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2218,9 +2222,8 @@ async def restore_table(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
- r"""Create a new table by restoring from a completed backup. The new
- table must be in the same project as the instance containing the
- backup. The returned table [long-running
+ r"""Create a new table by restoring from a completed backup. The
+ returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
@@ -2232,7 +2235,7 @@ async def restore_table(
request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]):
The request object. The request for
[RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable].
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2283,6 +2286,141 @@ async def restore_table(
# Done; return the response.
return response
+ async def copy_backup(
+ self,
+ request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Copy a Cloud Bigtable backup to a new backup in the
+ destination cluster located in the destination instance
+ and project.
+
+ Args:
+ request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]):
+ The request object. The request for
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
+ parent (:class:`str`):
+ Required. The name of the destination cluster that will
+ contain the backup copy. The cluster must already
+ exists. Values are of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (:class:`str`):
+ Required. The id of the new backup. The ``backup_id``
+ along with ``parent`` are combined as
+ {parent}/backups/{backup_id} to create the full backup
+ name, of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
+ This string must be between 1 and 50 characters in
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+
+ This corresponds to the ``backup_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ source_backup (:class:`str`):
+ Required. The source backup to be copied from. The
+ source backup needs to be in READY state for it to be
+ copied. Copying a copied backup is not allowed. Once
+ CopyBackup is in progress, the source backup cannot be
+ deleted or cleaned up on expiration until CopyBackup is
+ finished. Values are of the form:
+ ``projects//instances//clusters//backups/``.
+
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
+ Required. Required. The expiration time of the copied
+ backup with microsecond granularity that must be at
+ least 6 hours and at most 30 days from the time the
+ request is received. Once the ``expire_time`` has
+ passed, Cloud Bigtable will delete the backup and free
+ the resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.Backup` A
+ backup of a Cloud Bigtable table.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, backup_id, source_backup, expire_time])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = bigtable_table_admin.CopyBackupRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.copy_backup,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ table.Backup,
+ metadata_type=bigtable_table_admin.CopyBackupMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
async def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
@@ -2308,7 +2446,7 @@ async def get_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2337,42 +2475,11 @@ async def get_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2402,7 +2509,7 @@ async def get_iam_policy(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2457,7 +2564,7 @@ async def set_iam_policy(
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2486,42 +2593,11 @@ async def set_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2606,7 +2682,7 @@ async def test_iam_permissions(
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -2640,7 +2716,7 @@ async def test_iam_permissions(
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
- default_retry=retries.Retry(
+ default_retry=retries.AsyncRetry(
initial=1.0,
maximum=60.0,
multiplier=2,
@@ -2671,7 +2747,7 @@ async def test_iam_permissions(
# Done; return the response.
return response
- async def __aenter__(self):
+ async def __aenter__(self) -> "BigtableTableAdminAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
index e043aa224..d0c04ed11 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -685,6 +685,7 @@ def create_table_from_snapshot(
r"""Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -1587,6 +1588,7 @@ def snapshot_table(
r"""Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -1763,6 +1765,7 @@ def get_snapshot(
time. A snapshot can be used as a
checkpoint for data restoration or a
data source for a new table.
+
Note: This is a private alpha release of
Cloud Bigtable snapshots. This feature
is not currently available to most Cloud
@@ -1942,6 +1945,7 @@ def delete_snapshot(
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Permanently deletes the specified snapshot.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -2407,7 +2411,7 @@ def list_backups(
Returns:
google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager:
The response for
- [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
+ [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups].
Iterating over this object will yield results and
resolve additional pages automatically.
@@ -2472,9 +2476,8 @@ def restore_table(
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
- r"""Create a new table by restoring from a completed backup. The new
- table must be in the same project as the instance containing the
- backup. The returned table [long-running
+ r"""Create a new table by restoring from a completed backup. The
+ returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
@@ -2538,6 +2541,141 @@ def restore_table(
# Done; return the response.
return response
+ def copy_backup(
+ self,
+ request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None,
+ *,
+ parent: Optional[str] = None,
+ backup_id: Optional[str] = None,
+ source_backup: Optional[str] = None,
+ expire_time: Optional[timestamp_pb2.Timestamp] = None,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation.Operation:
+ r"""Copy a Cloud Bigtable backup to a new backup in the
+ destination cluster located in the destination instance
+ and project.
+
+ Args:
+ request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]):
+ The request object. The request for
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
+ parent (str):
+ Required. The name of the destination cluster that will
+ contain the backup copy. The cluster must already
+ exists. Values are of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ backup_id (str):
+ Required. The id of the new backup. The ``backup_id``
+ along with ``parent`` are combined as
+ {parent}/backups/{backup_id} to create the full backup
+ name, of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
+ This string must be between 1 and 50 characters in
+ length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+
+ This corresponds to the ``backup_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ source_backup (str):
+ Required. The source backup to be copied from. The
+ source backup needs to be in READY state for it to be
+ copied. Copying a copied backup is not allowed. Once
+ CopyBackup is in progress, the source backup cannot be
+ deleted or cleaned up on expiration until CopyBackup is
+ finished. Values are of the form:
+ ``projects//instances//clusters//backups/``.
+
+ This corresponds to the ``source_backup`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Required. Required. The expiration time of the copied
+ backup with microsecond granularity that must be at
+ least 6 hours and at most 30 days from the time the
+ request is received. Once the ``expire_time`` has
+ passed, Cloud Bigtable will delete the backup and free
+ the resources used by the backup.
+
+ This corresponds to the ``expire_time`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.bigtable_admin_v2.types.Backup` A
+ backup of a Cloud Bigtable table.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Quick check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, backup_id, source_backup, expire_time])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a bigtable_table_admin.CopyBackupRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, bigtable_table_admin.CopyBackupRequest):
+ request = bigtable_table_admin.CopyBackupRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if backup_id is not None:
+ request.backup_id = backup_id
+ if source_backup is not None:
+ request.source_backup = source_backup
+ if expire_time is not None:
+ request.expire_time = expire_time
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.copy_backup]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(
+ request,
+ retry=retry,
+ timeout=timeout,
+ metadata=metadata,
+ )
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ table.Backup,
+ metadata_type=bigtable_table_admin.CopyBackupMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
@@ -2592,42 +2730,11 @@ def get_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
@@ -2728,42 +2835,11 @@ def set_iam_policy(
**JSON example:**
- {
- "bindings": [
- {
- "role":
- "roles/resourcemanager.organizationAdmin",
- "members": [ "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
-
- }, { "role":
- "roles/resourcemanager.organizationViewer",
- "members": [ "user:eve@example.com" ],
- "condition": { "title": "expirable access",
- "description": "Does not grant access after
- Sep 2020", "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')", } }
-
- ], "etag": "BwWWja0YfJA=", "version": 3
-
- }
+ :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \`
**YAML example:**
- bindings: - members: - user:\ mike@example.com -
- group:\ admins@example.com - domain:google.com -
- serviceAccount:\ my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin -
- members: - user:\ eve@example.com role:
- roles/resourcemanager.organizationViewer
- condition: title: expirable access description:
- Does not grant access after Sep 2020 expression:
- request.time <
- timestamp('2020-10-01T00:00:00.000Z') etag:
- BwWWja0YfJA= version: 3
+ :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \`
For a description of IAM and its features, see the
[IAM
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
index e639227df..331647b4c 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py
index 585b4e437..be4aa8d2a 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
index cade1335b..c3cf01a96 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=60.0,
client_info=client_info,
),
+ self.copy_backup: gapic_v1.method.wrap_method(
+ self.copy_backup,
+ default_timeout=None,
+ client_info=client_info,
+ ),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
default_retry=retries.Retry(
@@ -577,6 +582,15 @@ def restore_table(
]:
raise NotImplementedError()
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CopyBackupRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
@property
def get_iam_policy(
self,
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
index f8cf9f834..d765869cd 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -294,6 +294,7 @@ def create_table_from_snapshot(
Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -591,6 +592,7 @@ def snapshot_table(
Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -692,6 +694,7 @@ def delete_snapshot(
r"""Return a callable for the delete snapshot method over gRPC.
Permanently deletes the specified snapshot.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -866,9 +869,8 @@ def restore_table(
) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]:
r"""Return a callable for the restore table method over gRPC.
- Create a new table by restoring from a completed backup. The new
- table must be in the same project as the instance containing the
- backup. The returned table [long-running
+ Create a new table by restoring from a completed backup. The
+ returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
@@ -894,6 +896,34 @@ def restore_table(
)
return self._stubs["restore_table"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Copy a Cloud Bigtable backup to a new backup in the
+ destination cluster located in the destination instance
+ and project.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self.grpc_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup",
+ request_serializer=bigtable_table_admin.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_iam_policy(
self,
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
index 54eb7e524..b60a7351c 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -302,6 +302,7 @@ def create_table_from_snapshot(
Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -607,6 +608,7 @@ def snapshot_table(
Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -710,6 +712,7 @@ def delete_snapshot(
r"""Return a callable for the delete snapshot method over gRPC.
Permanently deletes the specified snapshot.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
@@ -890,9 +893,8 @@ def restore_table(
]:
r"""Return a callable for the restore table method over gRPC.
- Create a new table by restoring from a completed backup. The new
- table must be in the same project as the instance containing the
- backup. The returned table [long-running
+ Create a new table by restoring from a completed backup. The
+ returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
@@ -918,6 +920,36 @@ def restore_table(
)
return self._stubs["restore_table"]
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[
+ [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation]
+ ]:
+ r"""Return a callable for the copy backup method over gRPC.
+
+ Copy a Cloud Bigtable backup to a new backup in the
+ destination cluster located in the destination instance
+ and project.
+
+ Returns:
+ Callable[[~.CopyBackupRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "copy_backup" not in self._stubs:
+ self._stubs["copy_backup"] = self.grpc_channel.unary_unary(
+ "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup",
+ request_serializer=bigtable_table_admin.CopyBackupRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["copy_backup"]
+
@property
def get_iam_policy(
self,
diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
index 4d5b2ed1c..41b893eb7 100644
--- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
+++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -45,8 +45,8 @@
from google.cloud.bigtable_admin_v2.types import table as gba_table
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
+from google.longrunning import operations_pb2 # type: ignore
from .base import (
BigtableTableAdminTransport,
@@ -84,6 +84,14 @@ def post_check_consistency(self, response):
logging.log(f"Received response: {response}")
return response
+ def pre_copy_backup(self, request, metadata):
+ logging.log(f"Received request: {request}")
+ return request, metadata
+
+ def post_copy_backup(self, response):
+ logging.log(f"Received response: {response}")
+ return response
+
def pre_create_backup(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
@@ -281,6 +289,29 @@ def post_check_consistency(
"""
return response
+ def pre_copy_backup(
+ self,
+ request: bigtable_table_admin.CopyBackupRequest,
+ metadata: Sequence[Tuple[str, str]],
+ ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]:
+ """Pre-rpc interceptor for copy_backup
+
+ Override in a subclass to manipulate the request or metadata
+ before they are sent to the BigtableTableAdmin server.
+ """
+ return request, metadata
+
+ def post_copy_backup(
+ self, response: operations_pb2.Operation
+ ) -> operations_pb2.Operation:
+ """Post-rpc interceptor for copy_backup
+
+ Override in a subclass to manipulate the response
+ after it is returned by the BigtableTableAdmin server but before
+ it is returned to user code.
+ """
+ return response
+
def pre_create_backup(
self,
request: bigtable_table_admin.CreateBackupRequest,
@@ -1010,6 +1041,103 @@ def __call__(
resp = self._interceptor.post_check_consistency(resp)
return resp
+ class _CopyBackup(BigtableTableAdminRestStub):
+ def __hash__(self):
+ return hash("CopyBackup")
+
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
+
+ @classmethod
+ def _get_unset_required_fields(cls, message_dict):
+ return {
+ k: v
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
+ if k not in message_dict
+ }
+
+ def __call__(
+ self,
+ request: bigtable_table_admin.CopyBackupRequest,
+ *,
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
+ timeout: Optional[float] = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operations_pb2.Operation:
+ r"""Call the copy backup method over HTTP.
+
+ Args:
+ request (~.bigtable_table_admin.CopyBackupRequest):
+ The request object. The request for
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operations_pb2.Operation:
+ This resource represents a
+ long-running operation that is the
+ result of a network API call.
+
+ """
+
+ http_options: List[Dict[str, str]] = [
+ {
+ "method": "post",
+ "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy",
+ "body": "*",
+ },
+ ]
+ request, metadata = self._interceptor.pre_copy_backup(request, metadata)
+ pb_request = bigtable_table_admin.CopyBackupRequest.pb(request)
+ transcoded_request = path_template.transcode(http_options, pb_request)
+
+ # Jsonify the request body
+
+ body = json_format.MessageToJson(
+ transcoded_request["body"],
+ including_default_value_fields=False,
+ use_integers_for_enums=True,
+ )
+ uri = transcoded_request["uri"]
+ method = transcoded_request["method"]
+
+ # Jsonify the query params
+ query_params = json.loads(
+ json_format.MessageToJson(
+ transcoded_request["query_params"],
+ including_default_value_fields=False,
+ use_integers_for_enums=True,
+ )
+ )
+ query_params.update(self._get_unset_required_fields(query_params))
+
+ query_params["$alt"] = "json;enum-encoding=int"
+
+ # Send the request
+ headers = dict(metadata)
+ headers["Content-Type"] = "application/json"
+ response = getattr(self._session, method)(
+ "{host}{uri}".format(host=self._host, uri=uri),
+ timeout=timeout,
+ headers=headers,
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
+ data=body,
+ )
+
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
+ # subclass.
+ if response.status_code >= 400:
+ raise core_exceptions.from_http_response(response)
+
+ # Return the response
+ resp = operations_pb2.Operation()
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
+ resp = self._interceptor.post_copy_backup(resp)
+ return resp
+
class _CreateBackup(BigtableTableAdminRestStub):
def __hash__(self):
return hash("CreateBackup")
@@ -1881,54 +2009,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
@@ -2044,6 +2172,7 @@ def __call__(
time. A snapshot can be used as a
checkpoint for data restoration or a
data source for a new table.
+
Note: This is a private alpha release of
Cloud Bigtable snapshots. This feature
is not currently available to most Cloud
@@ -2733,54 +2862,54 @@ def __call__(
::
- {
- "bindings": [
- {
- "role": "roles/resourcemanager.organizationAdmin",
- "members": [
- "user:mike@example.com",
- "group:admins@example.com",
- "domain:google.com",
- "serviceAccount:my-project-id@appspot.gserviceaccount.com"
- ]
- },
- {
- "role": "roles/resourcemanager.organizationViewer",
- "members": [
- "user:eve@example.com"
- ],
- "condition": {
- "title": "expirable access",
- "description": "Does not grant access after Sep 2020",
- "expression": "request.time <
- timestamp('2020-10-01T00:00:00.000Z')",
- }
- }
- ],
- "etag": "BwWWja0YfJA=",
- "version": 3
- }
+ {
+ "bindings": [
+ {
+ "role": "roles/resourcemanager.organizationAdmin",
+ "members": [
+ "user:mike@example.com",
+ "group:admins@example.com",
+ "domain:google.com",
+ "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+ ]
+ },
+ {
+ "role": "roles/resourcemanager.organizationViewer",
+ "members": [
+ "user:eve@example.com"
+ ],
+ "condition": {
+ "title": "expirable access",
+ "description": "Does not grant access after Sep 2020",
+ "expression": "request.time <
+ timestamp('2020-10-01T00:00:00.000Z')",
+ }
+ }
+ ],
+ "etag": "BwWWja0YfJA=",
+ "version": 3
+ }
**YAML example:**
::
- bindings:
- - members:
- - user:mike@example.com
- - group:admins@example.com
- - domain:google.com
- - serviceAccount:my-project-id@appspot.gserviceaccount.com
- role: roles/resourcemanager.organizationAdmin
- - members:
- - user:eve@example.com
- role: roles/resourcemanager.organizationViewer
- condition:
- title: expirable access
- description: Does not grant access after Sep 2020
- expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
- etag: BwWWja0YfJA=
- version: 3
+ bindings:
+ - members:
+ - user:mike@example.com
+ - group:admins@example.com
+ - domain:google.com
+ - serviceAccount:my-project-id@appspot.gserviceaccount.com
+ role: roles/resourcemanager.organizationAdmin
+ - members:
+ - user:eve@example.com
+ role: roles/resourcemanager.organizationViewer
+ condition:
+ title: expirable access
+ description: Does not grant access after Sep 2020
+ expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+ etag: BwWWja0YfJA=
+ version: 3
For a description of IAM and its features, see the `IAM
documentation `__.
@@ -3360,6 +3489,14 @@ def check_consistency(
# In C++ this would require a dynamic_cast
return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore
+ @property
+ def copy_backup(
+ self,
+ ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]:
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
+ # In C++ this would require a dynamic_cast
+ return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore
+
@property
def create_backup(
self,
diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py
index 69153c9fc..a2fefffc8 100644
--- a/google/cloud/bigtable_admin_v2/types/__init__.py
+++ b/google/cloud/bigtable_admin_v2/types/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,6 +44,8 @@
from .bigtable_table_admin import (
CheckConsistencyRequest,
CheckConsistencyResponse,
+ CopyBackupMetadata,
+ CopyBackupRequest,
CreateBackupMetadata,
CreateBackupRequest,
CreateTableFromSnapshotMetadata,
@@ -130,6 +132,8 @@
"UpdateInstanceMetadata",
"CheckConsistencyRequest",
"CheckConsistencyResponse",
+ "CopyBackupMetadata",
+ "CopyBackupRequest",
"CreateBackupMetadata",
"CreateBackupRequest",
"CreateTableFromSnapshotMetadata",
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
index a22543354..87332a351 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
index 4c4b9e9e2..6a3b31a1e 100644
--- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
+++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,6 +62,8 @@
"DeleteBackupRequest",
"ListBackupsRequest",
"ListBackupsResponse",
+ "CopyBackupRequest",
+ "CopyBackupMetadata",
},
)
@@ -76,8 +78,7 @@ class RestoreTableRequest(proto.Message):
Attributes:
parent (str):
Required. The name of the instance in which to create the
- restored table. This instance must be in the same project as
- the source backup. Values are of the form
+ restored table. Values are of the form
``projects//instances/``.
table_id (str):
Required. The id of the table to create and restore to. This
@@ -359,7 +360,7 @@ class ListTablesRequest(proto.Message):
should be listed. Values are of the form
``projects/{project}/instances/{instance}``.
view (google.cloud.bigtable_admin_v2.types.Table.View):
- The view to be applied to the returned tables' fields. Only
+ The view to be applied to the returned tables' fields.
NAME_ONLY view (default) and REPLICATION_VIEW are supported.
page_size (int):
Maximum number of results per page.
@@ -917,6 +918,7 @@ class DeleteSnapshotRequest(proto.Message):
class SnapshotTableMetadata(proto.Message):
r"""The metadata for the Operation returned by SnapshotTable.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to most Cloud
Bigtable customers. This feature might be changed in
@@ -1192,8 +1194,15 @@ class ListBackupsRequest(proto.Message):
fields in [Backup][google.bigtable.admin.v2.Backup]. The
full syntax is described at https://aip.dev/132#ordering.
- Fields supported are: \* name \* source_table \* expire_time
- \* start_time \* end_time \* size_bytes \* state
+ Fields supported are:
+
+ - name
+ - source_table
+ - expire_time
+ - start_time
+ - end_time
+ - size_bytes
+ - state
For example, "start_time". The default sorting order is
ascending. To specify descending order for the field, a
@@ -1266,4 +1275,90 @@ def raw_page(self):
)
+class CopyBackupRequest(proto.Message):
+ r"""The request for
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
+
+ Attributes:
+ parent (str):
+ Required. The name of the destination cluster that will
+ contain the backup copy. The cluster must already exists.
+ Values are of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}``.
+ backup_id (str):
+ Required. The id of the new backup. The ``backup_id`` along
+ with ``parent`` are combined as {parent}/backups/{backup_id}
+ to create the full backup name, of the form:
+ ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``.
+ This string must be between 1 and 50 characters in length
+ and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*.
+ source_backup (str):
+ Required. The source backup to be copied from. The source
+ backup needs to be in READY state for it to be copied.
+ Copying a copied backup is not allowed. Once CopyBackup is
+ in progress, the source backup cannot be deleted or cleaned
+ up on expiration until CopyBackup is finished. Values are of
+ the form:
+ ``projects//instances//clusters//backups/``.
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
+ Required. Required. The expiration time of the copied backup
+ with microsecond granularity that must be at least 6 hours
+ and at most 30 days from the time the request is received.
+ Once the ``expire_time`` has passed, Cloud Bigtable will
+ delete the backup and free the resources used by the backup.
+ """
+
+ parent: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ backup_id: str = proto.Field(
+ proto.STRING,
+ number=2,
+ )
+ source_backup: str = proto.Field(
+ proto.STRING,
+ number=3,
+ )
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ message=timestamp_pb2.Timestamp,
+ )
+
+
+class CopyBackupMetadata(proto.Message):
+ r"""Metadata type for the google.longrunning.Operation returned by
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup].
+
+ Attributes:
+ name (str):
+ The name of the backup being created through the copy
+ operation. Values are of the form
+ ``projects//instances//clusters//backups/``.
+ source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo):
+ Information about the source backup that is
+ being copied from.
+ progress (google.cloud.bigtable_admin_v2.types.OperationProgress):
+ The progress of the
+ [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]
+ operation.
+ """
+
+ name: str = proto.Field(
+ proto.STRING,
+ number=1,
+ )
+ source_backup_info: gba_table.BackupInfo = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=gba_table.BackupInfo,
+ )
+ progress: common.OperationProgress = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ message=common.OperationProgress,
+ )
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_admin_v2/types/common.py b/google/cloud/bigtable_admin_v2/types/common.py
index 2cc71fc43..959b9deb1 100644
--- a/google/cloud/bigtable_admin_v2/types/common.py
+++ b/google/cloud/bigtable_admin_v2/types/common.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py
index 2b5d81636..78efd711b 100644
--- a/google/cloud/bigtable_admin_v2/types/instance.py
+++ b/google/cloud/bigtable_admin_v2/types/instance.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -173,7 +173,7 @@ class AutoscalingTargets(proto.Message):
The storage utilization that the Autoscaler should be trying
to achieve. This number is limited between 2560 (2.5TiB) and
5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and
- 16384 (16TiB) for an HDD cluster; otherwise it will return
+ 16384 (16TiB) for an HDD cluster, otherwise it will return
INVALID_ARGUMENT error. If this value is set to 0, it will
be treated as if it were set to the default value: 2560 for
SSD, 8192 for HDD.
@@ -419,8 +419,43 @@ class AppProfile(proto.Message):
Use a single-cluster routing policy.
This field is a member of `oneof`_ ``routing_policy``.
+ priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority):
+ This field has been deprecated in favor of
+ ``standard_isolation.priority``. If you set this field,
+ ``standard_isolation.priority`` will be set instead.
+
+ The priority of requests sent using this app profile.
+
+ This field is a member of `oneof`_ ``isolation``.
+ standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation):
+ The standard options used for isolating this
+ app profile's traffic from other use cases.
+
+ This field is a member of `oneof`_ ``isolation``.
"""
+ class Priority(proto.Enum):
+ r"""Possible priorities for an app profile. Note that higher
+ priority writes can sometimes queue behind lower priority writes
+ to the same tablet, as writes must be strictly sequenced in the
+ durability log.
+
+ Values:
+ PRIORITY_UNSPECIFIED (0):
+ Default value. Mapped to PRIORITY_HIGH (the legacy behavior)
+ on creation.
+ PRIORITY_LOW (1):
+ No description available.
+ PRIORITY_MEDIUM (2):
+ No description available.
+ PRIORITY_HIGH (3):
+ No description available.
+ """
+ PRIORITY_UNSPECIFIED = 0
+ PRIORITY_LOW = 1
+ PRIORITY_MEDIUM = 2
+ PRIORITY_HIGH = 3
+
class MultiClusterRoutingUseAny(proto.Message):
r"""Read/write requests are routed to the nearest cluster in the
instance, and will fail over to the nearest cluster that is
@@ -466,6 +501,22 @@ class SingleClusterRouting(proto.Message):
number=2,
)
+ class StandardIsolation(proto.Message):
+ r"""Standard options for isolating this app profile's traffic
+ from other use cases.
+
+ Attributes:
+ priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority):
+ The priority of requests sent using this app
+ profile.
+ """
+
+ priority: "AppProfile.Priority" = proto.Field(
+ proto.ENUM,
+ number=1,
+ enum="AppProfile.Priority",
+ )
+
name: str = proto.Field(
proto.STRING,
number=1,
@@ -490,6 +541,18 @@ class SingleClusterRouting(proto.Message):
oneof="routing_policy",
message=SingleClusterRouting,
)
+ priority: Priority = proto.Field(
+ proto.ENUM,
+ number=7,
+ oneof="isolation",
+ enum=Priority,
+ )
+ standard_isolation: StandardIsolation = proto.Field(
+ proto.MESSAGE,
+ number=11,
+ oneof="isolation",
+ message=StandardIsolation,
+ )
class HotTablet(proto.Message):
diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py
index 16d136e16..57bd1b00f 100644
--- a/google/cloud/bigtable_admin_v2/types/table.py
+++ b/google/cloud/bigtable_admin_v2/types/table.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -124,7 +124,8 @@ class Table(proto.Message):
``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL``
column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]):
The column families configured for this table, mapped by
- column family ID. Views: ``SCHEMA_VIEW``, ``FULL``
+ column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``,
+ ``FULL``
granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity):
Immutable. The granularity (i.e. ``MILLIS``) at which
timestamps are stored in this table. Timestamps not matching
@@ -141,14 +142,16 @@ class Table(proto.Message):
this table. Otherwise, the change stream is
disabled and the change stream is not retained.
deletion_protection (bool):
- Set to true to make the table protected
- against data loss. i.e. deleting the following
- resources through Admin APIs are prohibited: -
- The table.
- - The column families in the table.
- - The instance containing the table.
- Note one can still delete the data stored in the
- table through Data APIs.
+ Set to true to make the table protected against data loss.
+ i.e. deleting the following resources through Admin APIs are
+ prohibited:
+
+ - The table.
+ - The column families in the table.
+ - The instance containing the table.
+
+ Note one can still delete the data stored in the table
+ through Data APIs.
"""
class TimestampGranularity(proto.Enum):
@@ -308,6 +311,7 @@ class ColumnFamily(proto.Message):
gc_rule (google.cloud.bigtable_admin_v2.types.GcRule):
Garbage collection rule specified as a
protobuf. Must serialize to at most 500 bytes.
+
NOTE: Garbage collection executes
opportunistically in the background, and so it's
possible for reads to return a cell even if it
@@ -478,6 +482,7 @@ class Snapshot(proto.Message):
r"""A snapshot of a table at a particular time. A snapshot can be
used as a checkpoint for data restoration or a data source for a
new table.
+
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to most Cloud
Bigtable customers. This feature might be changed in
@@ -486,8 +491,7 @@ class Snapshot(proto.Message):
Attributes:
name (str):
- Output only. The unique name of the snapshot. Values are of
- the form
+ The unique name of the snapshot. Values are of the form
``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``.
source_table (google.cloud.bigtable_admin_v2.types.Table):
Output only. The source table at the time the
@@ -502,16 +506,15 @@ class Snapshot(proto.Message):
Output only. The time when the snapshot is
created.
delete_time (google.protobuf.timestamp_pb2.Timestamp):
- Output only. The time when the snapshot will
- be deleted. The maximum amount of time a
- snapshot can stay active is 365 days. If 'ttl'
- is not specified, the default maximum of 365
- days will be used.
+ The time when the snapshot will be deleted.
+ The maximum amount of time a snapshot can stay
+ active is 365 days. If 'ttl' is not specified,
+ the default maximum of 365 days will be used.
state (google.cloud.bigtable_admin_v2.types.Snapshot.State):
Output only. The current state of the
snapshot.
description (str):
- Output only. Description of the snapshot.
+ Description of the snapshot.
"""
class State(proto.Enum):
@@ -587,10 +590,16 @@ class Backup(proto.Message):
backup was created. This needs to be in the same instance as
the backup. Values are of the form
``projects/{project}/instances/{instance}/tables/{source_table}``.
+ source_backup (str):
+ Output only. Name of the backup from which
+ this backup was copied. If a backup is not
+ created by copying a backup, this field will be
+ empty. Values are of the form:
+ projects//instances//backups/.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The expiration time of the backup, with
microseconds granularity that must be at least 6 hours and
- at most 30 days from the time the request is received. Once
+ at most 90 days from the time the request is received. Once
the ``expire_time`` has passed, Cloud Bigtable will delete
the backup and free the resources used by the backup.
start_time (google.protobuf.timestamp_pb2.Timestamp):
@@ -636,6 +645,10 @@ class State(proto.Enum):
proto.STRING,
number=2,
)
+ source_backup: str = proto.Field(
+ proto.STRING,
+ number=10,
+ )
expire_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
@@ -684,6 +697,12 @@ class BackupInfo(proto.Message):
source_table (str):
Output only. Name of the table the backup was
created from.
+ source_backup (str):
+ Output only. Name of the backup from which
+ this backup was copied. If a backup is not
+ created by copying a backup, this field will be
+ empty. Values are of the form:
+ projects//instances//backups/.
"""
backup: str = proto.Field(
@@ -704,6 +723,10 @@ class BackupInfo(proto.Message):
proto.STRING,
number=4,
)
+ source_backup: str = proto.Field(
+ proto.STRING,
+ number=10,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py
index ee3bd8c0c..80bd4ec09 100644
--- a/google/cloud/bigtable_v2/__init__.py
+++ b/google/cloud/bigtable_v2/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py
index 0f1a446f3..e546bae05 100644
--- a/google/cloud/bigtable_v2/gapic_version.py
+++ b/google/cloud/bigtable_v2/gapic_version.py
@@ -13,4 +13,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = "2.19.0" # {x-release-please-version}
+__version__ = "2.21.0" # {x-release-please-version}
diff --git a/google/cloud/bigtable_v2/services/__init__.py b/google/cloud/bigtable_v2/services/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/google/cloud/bigtable_v2/services/__init__.py
+++ b/google/cloud/bigtable_v2/services/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/__init__.py b/google/cloud/bigtable_v2/services/bigtable/__init__.py
index cfce7b6b8..f10a68e5b 100644
--- a/google/cloud/bigtable_v2/services/bigtable/__init__.py
+++ b/google/cloud/bigtable_v2/services/bigtable/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py
index d325564c0..038180974 100644
--- a/google/cloud/bigtable_v2/services/bigtable/async_client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -36,14 +36,14 @@
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
-from google.api_core import retry as retries
+from google.api_core import retry_async as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
- OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
- OptionalRetry = Union[retries.Retry, object, None] # type: ignore
+ OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore
from google.cloud.bigtable_v2.types import bigtable
from google.cloud.bigtable_v2.types import data
@@ -251,7 +251,7 @@ def read_rows(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -344,7 +344,7 @@ def sample_row_keys(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -454,7 +454,7 @@ async def mutate_row(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -492,9 +492,21 @@ async def mutate_row(
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = self._client._transport._wrapped_methods[
- self._client._transport.mutate_row
- ]
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.mutate_row,
+ default_retry=retries.AsyncRetry(
+ initial=0.01,
+ maximum=60.0,
+ multiplier=2,
+ predicate=retries.if_exception_type(
+ core_exceptions.DeadlineExceeded,
+ core_exceptions.ServiceUnavailable,
+ ),
+ deadline=60.0,
+ ),
+ default_timeout=60.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
# Certain fields should be provided within the metadata header;
# add these here.
@@ -565,7 +577,7 @@ def mutate_rows(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -703,7 +715,7 @@ async def check_and_mutate_row(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -812,7 +824,7 @@ async def ping_and_warm(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -929,7 +941,7 @@ async def read_modify_write_row(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1034,7 +1046,7 @@ def generate_initial_change_stream_partitions(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1136,7 +1148,7 @@ def read_change_stream(
This corresponds to the ``app_profile_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
- retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
@@ -1196,7 +1208,7 @@ def read_change_stream(
# Done; return the response.
return response
- async def __aenter__(self):
+ async def __aenter__(self) -> "BigtableAsyncClient":
return self
async def __aexit__(self, exc_type, exc, tb):
diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py
index 1c2e7b822..54ba6af43 100644
--- a/google/cloud/bigtable_v2/services/bigtable/client.py
+++ b/google/cloud/bigtable_v2/services/bigtable/client.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py
index e8796bb8c..6a9eb0e58 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
index 5b4580c18..b580bbca7 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
index b9e073e8a..8ba04e761 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
index 3450d4969..1d0a2bc4c 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
index 4343fbb90..31d230f94 100644
--- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
+++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py
index 9f15efaf5..f266becb9 100644
--- a/google/cloud/bigtable_v2/types/__init__.py
+++ b/google/cloud/bigtable_v2/types/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py
index 13f6ac0db..57f806408 100644
--- a/google/cloud/bigtable_v2/types/bigtable.py
+++ b/google/cloud/bigtable_v2/types/bigtable.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -80,6 +80,21 @@ class ReadRowsRequest(proto.Message):
request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView):
The view into RequestStats, as described
above.
+ reversed (bool):
+ Experimental API - Please note that this API is currently
+ experimental and can change in the future.
+
+ Return rows in lexiographical descending order of the row
+ keys. The row contents will not be affected by this flag.
+
+ Example result set:
+
+ ::
+
+ [
+ {key: "k2", "f:col1": "v1", "f:col2": "v1"},
+ {key: "k1", "f:col1": "v2", "f:col2": "v2"}
+ ]
"""
class RequestStatsView(proto.Enum):
@@ -131,6 +146,10 @@ class RequestStatsView(proto.Enum):
number=6,
enum=RequestStatsView,
)
+ reversed: bool = proto.Field(
+ proto.BOOL,
+ number=7,
+ )
class ReadRowsResponse(proto.Message):
diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py
index 515e167df..e37644a76 100644
--- a/google/cloud/bigtable_v2/types/data.py
+++ b/google/cloud/bigtable_v2/types/data.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -697,6 +697,7 @@ class Condition(proto.Message):
r"""A RowFilter which evaluates one of two possible RowFilters,
depending on whether or not a predicate RowFilter outputs any
cells from the input row.
+
IMPORTANT NOTE: The predicate filter does not execute atomically
with the true and false filters, which may lead to inconsistent
or unexpected results. Additionally, Condition filters have poor
diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py
index 1b5f76e24..92ac5023d 100644
--- a/google/cloud/bigtable_v2/types/feature_flags.py
+++ b/google/cloud/bigtable_v2/types/feature_flags.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,26 +29,54 @@
class FeatureFlags(proto.Message):
- r"""Feature flags supported by a client. This is intended to be sent as
- part of request metadata to assure the server that certain behaviors
- are safe to enable. This proto is meant to be serialized and
- websafe-base64 encoded under the ``bigtable-features`` metadata key.
- The value will remain constant for the lifetime of a client and due
- to HTTP2's HPACK compression, the request overhead will be tiny.
- This is an internal implementation detail and should not be used by
- endusers directly.
+ r"""Feature flags supported or enabled by a client. This is intended to
+ be sent as part of request metadata to assure the server that
+ certain behaviors are safe to enable. This proto is meant to be
+ serialized and websafe-base64 encoded under the
+ ``bigtable-features`` metadata key. The value will remain constant
+ for the lifetime of a client and due to HTTP2's HPACK compression,
+ the request overhead will be tiny. This is an internal
+ implementation detail and should not be used by end users directly.
Attributes:
+ reverse_scans (bool):
+ Notify the server that the client supports
+ reverse scans. The server will reject
+ ReadRowsRequests with the reverse bit set when
+ this is absent.
mutate_rows_rate_limit (bool):
Notify the server that the client enables
batch write flow control by requesting
- RateLimitInfo from MutateRowsResponse.
+ RateLimitInfo from MutateRowsResponse. Due to
+ technical reasons, this disables partial
+ retries.
+ mutate_rows_rate_limit2 (bool):
+ Notify the server that the client enables
+ batch write flow control by requesting
+ RateLimitInfo from MutateRowsResponse. With
+ partial retries enabled.
+ last_scanned_row_responses (bool):
+ Notify the server that the client supports the
+ last_scanned_row field in ReadRowsResponse for long-running
+ scans.
"""
+ reverse_scans: bool = proto.Field(
+ proto.BOOL,
+ number=1,
+ )
mutate_rows_rate_limit: bool = proto.Field(
proto.BOOL,
number=3,
)
+ mutate_rows_rate_limit2: bool = proto.Field(
+ proto.BOOL,
+ number=5,
+ )
+ last_scanned_row_responses: bool = proto.Field(
+ proto.BOOL,
+ number=4,
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/bigtable_v2/types/request_stats.py b/google/cloud/bigtable_v2/types/request_stats.py
index d72ba8694..61cce9491 100644
--- a/google/cloud/bigtable_v2/types/request_stats.py
+++ b/google/cloud/bigtable_v2/types/request_stats.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -86,6 +86,7 @@ class RequestLatencyStats(proto.Message):
response. For more context on the component that
is measuring this latency, see:
https://cloud.google.com/bigtable/docs/overview
+
Note: This value may be slightly shorter than
the value reported into aggregate latency
metrics in Monitoring for this request
diff --git a/google/cloud/bigtable_v2/types/response_params.py b/google/cloud/bigtable_v2/types/response_params.py
index 2532e64e2..98e3a67db 100644
--- a/google/cloud/bigtable_v2/types/response_params.py
+++ b/google/cloud/bigtable_v2/types/response_params.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/mypy.ini b/mypy.ini
index f12ed46fc..31cc24223 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,5 +1,5 @@
[mypy]
-python_version = 3.6
+python_version = 3.8
namespace_packages = True
exclude = tests/unit/gapic/
diff --git a/noxfile.py b/noxfile.py
index e1d2f4acc..db7f2f18e 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2018 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,21 +17,24 @@
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
+
import os
import pathlib
import re
import shutil
+from typing import Dict, List
import warnings
import nox
-BLACK_VERSION = "black==22.3.0"
-ISORT_VERSION = "isort==5.10.1"
+FLAKE8_VERSION = "flake8==6.1.0"
+BLACK_VERSION = "black[jupyter]==23.7.0"
+ISORT_VERSION = "isort==5.11.0"
LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
-UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
UNIT_TEST_STANDARD_DEPENDENCIES = [
"mock",
"asyncmock",
@@ -39,29 +42,24 @@
"pytest-cov",
"pytest-asyncio",
]
-UNIT_TEST_EXTERNAL_DEPENDENCIES = [
- # "git+https://github.com/googleapis/python-api-core.git@retry_generators"
-]
-UNIT_TEST_LOCAL_DEPENDENCIES = []
-UNIT_TEST_DEPENDENCIES = []
-UNIT_TEST_EXTRAS = []
-UNIT_TEST_EXTRAS_BY_PYTHON = {}
-
-SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
-SYSTEM_TEST_STANDARD_DEPENDENCIES = [
+UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = []
+UNIT_TEST_DEPENDENCIES: List[str] = []
+UNIT_TEST_EXTRAS: List[str] = []
+UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
+
+SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"]
+SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [
"mock",
"pytest",
"pytest-asyncio",
"google-cloud-testutils",
]
-SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [
- # "git+https://github.com/googleapis/python-api-core.git@retry_generators"
-]
-SYSTEM_TEST_LOCAL_DEPENDENCIES = []
-UNIT_TEST_DEPENDENCIES = []
-SYSTEM_TEST_DEPENDENCIES = []
-SYSTEM_TEST_EXTRAS = []
-SYSTEM_TEST_EXTRAS_BY_PYTHON = {}
+SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_DEPENDENCIES: List[str] = []
+SYSTEM_TEST_EXTRAS: List[str] = []
+SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {}
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
@@ -76,6 +74,7 @@
"lint_setup_py",
"blacken",
"docs",
+ "format",
]
# Error if a python version is missing
@@ -89,7 +88,7 @@ def lint(session):
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
- session.install("flake8", BLACK_VERSION)
+ session.install(FLAKE8_VERSION, BLACK_VERSION)
session.run(
"black",
"--check",
@@ -136,17 +135,8 @@ def mypy(session):
"mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests"
)
session.install("google-cloud-testutils")
- session.run(
- "mypy",
- "google/cloud/bigtable/data",
- "--check-untyped-defs",
- "--warn-unreachable",
- "--disallow-any-generics",
- "--exclude",
- "tests/system/v2_client",
- "--exclude",
- "tests/unit/v2_client",
- )
+ # TODO: also verify types on tests, all of google package
+ session.run("mypy", "-p", "google", "-p", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -221,7 +211,6 @@ def unit(session):
def install_systemtest_dependencies(session, *constraints):
-
# Use pre-release gRPC for system tests.
# Exclude version 1.52.0rc1 which has a known issue.
# See https://github.com/grpc/grpc/issues/32163
@@ -381,7 +370,7 @@ def docs(session):
)
-@nox.session(python="3.9")
+@nox.session(python="3.10")
def docfx(session):
"""Build the docfx yaml files for this library."""
@@ -460,7 +449,8 @@ def prerelease_deps(session):
# Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163
"grpcio!=1.52.0rc1",
"grpcio-status",
- "google-api-core==2.12.0.dev1", # TODO: remove this once streaming retries is merged
+ "google-api-core",
+ "google-auth",
"proto-plus",
"google-cloud-testutils",
# dependencies of google-cloud-testutils"
@@ -473,7 +463,6 @@ def prerelease_deps(session):
# Remaining dependencies
other_deps = [
"requests",
- "google-auth",
]
session.install(*other_deps)
@@ -482,6 +471,7 @@ def prerelease_deps(session):
"python", "-c", "import google.protobuf; print(google.protobuf.__version__)"
)
session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import google.auth; print(google.auth.__version__)")
session.run("py.test", "tests/unit")
diff --git a/owlbot.py b/owlbot.py
index b542b3246..626b705af 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -168,19 +168,8 @@ def mypy(session):
session.install("-e", ".")
session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests")
session.install("google-cloud-testutils")
- session.run(
- "mypy",
- "google/cloud/bigtable",
- "--check-untyped-defs",
- "--warn-unreachable",
- "--disallow-any-generics",
- "--exclude",
- "google/cloud/bigtable/deprecated",
- "--exclude",
- "tests/system/v2_client",
- "--exclude",
- "tests/unit/v2_client",
- )
+ # TODO: also verify types on tests, all of google package
+ session.run("mypy", "-p", "google", "-p", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
diff --git a/samples/beam/noxfile.py b/samples/beam/noxfile.py
index 3d4395024..80ffdb178 100644
--- a/samples/beam/noxfile.py
+++ b/samples/beam/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/beam/requirements-test.txt
+++ b/samples/beam/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt
index 8be9b98e0..9b95d0b52 100644
--- a/samples/beam/requirements.txt
+++ b/samples/beam/requirements.txt
@@ -1,3 +1,3 @@
apache-beam==2.46.0
google-cloud-bigtable==2.17.0
-google-cloud-core==2.3.2
+google-cloud-core==2.3.3
diff --git a/samples/hello/noxfile.py b/samples/hello/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/hello/noxfile.py
+++ b/samples/hello/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/hello/requirements-test.txt
+++ b/samples/hello/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt
index 199541ffe..a76d144e6 100644
--- a/samples/hello/requirements.txt
+++ b/samples/hello/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
-google-cloud-core==2.3.2
+google-cloud-bigtable==2.20.0
+google-cloud-core==2.3.3
diff --git a/samples/hello_happybase/noxfile.py b/samples/hello_happybase/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/hello_happybase/noxfile.py
+++ b/samples/hello_happybase/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/hello_happybase/requirements-test.txt
+++ b/samples/hello_happybase/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/hello_happybase/requirements.txt b/samples/hello_happybase/requirements.txt
index a144f03e1..d3368cd0f 100644
--- a/samples/hello_happybase/requirements.txt
+++ b/samples/hello_happybase/requirements.txt
@@ -1 +1,2 @@
google-cloud-happybase==0.33.0
+six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128
diff --git a/samples/instanceadmin/noxfile.py b/samples/instanceadmin/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/instanceadmin/noxfile.py
+++ b/samples/instanceadmin/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/instanceadmin/requirements-test.txt
+++ b/samples/instanceadmin/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt
index 04e476254..bba9ed8cf 100644
--- a/samples/instanceadmin/requirements.txt
+++ b/samples/instanceadmin/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
backoff==2.2.1
diff --git a/samples/metricscaler/noxfile.py b/samples/metricscaler/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/metricscaler/noxfile.py
+++ b/samples/metricscaler/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt
index 761227068..d8ae088dd 100644
--- a/samples/metricscaler/requirements-test.txt
+++ b/samples/metricscaler/requirements-test.txt
@@ -1,3 +1,3 @@
-pytest==7.3.1
-mock==5.0.2
+pytest==7.4.0
+mock==5.1.0
google-cloud-testutils
diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt
index 02e08b4c8..c0fce2294 100644
--- a/samples/metricscaler/requirements.txt
+++ b/samples/metricscaler/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
-google-cloud-monitoring==2.14.2
+google-cloud-bigtable==2.20.0
+google-cloud-monitoring==2.15.1
diff --git a/samples/quickstart/noxfile.py b/samples/quickstart/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/quickstart/noxfile.py
+++ b/samples/quickstart/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/quickstart/requirements-test.txt
+++ b/samples/quickstart/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt
index 909f8c365..83e37754e 100644
--- a/samples/quickstart/requirements.txt
+++ b/samples/quickstart/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
diff --git a/samples/quickstart_happybase/noxfile.py b/samples/quickstart_happybase/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/quickstart_happybase/noxfile.py
+++ b/samples/quickstart_happybase/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/quickstart_happybase/requirements-test.txt
+++ b/samples/quickstart_happybase/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/quickstart_happybase/requirements.txt b/samples/quickstart_happybase/requirements.txt
index a144f03e1..d3368cd0f 100644
--- a/samples/quickstart_happybase/requirements.txt
+++ b/samples/quickstart_happybase/requirements.txt
@@ -1 +1,2 @@
google-cloud-happybase==0.33.0
+six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128
diff --git a/samples/snippets/deletes/noxfile.py b/samples/snippets/deletes/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/snippets/deletes/noxfile.py
+++ b/samples/snippets/deletes/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/deletes/requirements-test.txt b/samples/snippets/deletes/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/snippets/deletes/requirements-test.txt
+++ b/samples/snippets/deletes/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/snippets/deletes/requirements.txt b/samples/snippets/deletes/requirements.txt
index 200665631..85b4e786f 100644
--- a/samples/snippets/deletes/requirements.txt
+++ b/samples/snippets/deletes/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
snapshottest==0.6.0
\ No newline at end of file
diff --git a/samples/snippets/filters/noxfile.py b/samples/snippets/filters/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/snippets/filters/noxfile.py
+++ b/samples/snippets/filters/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/snippets/filters/requirements-test.txt
+++ b/samples/snippets/filters/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt
index 200665631..85b4e786f 100644
--- a/samples/snippets/filters/requirements.txt
+++ b/samples/snippets/filters/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
snapshottest==0.6.0
\ No newline at end of file
diff --git a/samples/snippets/reads/noxfile.py b/samples/snippets/reads/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/snippets/reads/noxfile.py
+++ b/samples/snippets/reads/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt
index c4d04a08d..70613be0c 100644
--- a/samples/snippets/reads/requirements-test.txt
+++ b/samples/snippets/reads/requirements-test.txt
@@ -1 +1 @@
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt
index 200665631..85b4e786f 100644
--- a/samples/snippets/reads/requirements.txt
+++ b/samples/snippets/reads/requirements.txt
@@ -1,2 +1,2 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
snapshottest==0.6.0
\ No newline at end of file
diff --git a/samples/snippets/writes/noxfile.py b/samples/snippets/writes/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/snippets/writes/noxfile.py
+++ b/samples/snippets/writes/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt
index 96aa71dab..cbd0a47de 100644
--- a/samples/snippets/writes/requirements-test.txt
+++ b/samples/snippets/writes/requirements-test.txt
@@ -1,2 +1,2 @@
backoff==2.2.1
-pytest==7.3.1
+pytest==7.4.0
diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt
index 32cead029..90fa5577c 100644
--- a/samples/snippets/writes/requirements.txt
+++ b/samples/snippets/writes/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.17.0
\ No newline at end of file
+google-cloud-bigtable==2.20.0
\ No newline at end of file
diff --git a/samples/tableadmin/noxfile.py b/samples/tableadmin/noxfile.py
index 7c8a63994..483b55901 100644
--- a/samples/tableadmin/noxfile.py
+++ b/samples/tableadmin/noxfile.py
@@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt
index ca1f33bd3..b4ead9993 100644
--- a/samples/tableadmin/requirements-test.txt
+++ b/samples/tableadmin/requirements-test.txt
@@ -1,2 +1,2 @@
-pytest==7.3.1
+pytest==7.4.0
google-cloud-testutils==1.3.3
diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt
index 909f8c365..83e37754e 100644
--- a/samples/tableadmin/requirements.txt
+++ b/samples/tableadmin/requirements.txt
@@ -1 +1 @@
-google-cloud-bigtable==2.17.0
+google-cloud-bigtable==2.20.0
diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh
index 21f6d2a26..0018b421d 100755
--- a/scripts/decrypt-secrets.sh
+++ b/scripts/decrypt-secrets.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# Copyright 2015 Google Inc. All rights reserved.
+# Copyright 2023 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py
index 17be56f2f..6882feaf6 100644
--- a/scripts/fixup_bigtable_admin_v2_keywords.py
+++ b/scripts/fixup_bigtable_admin_v2_keywords.py
@@ -1,6 +1,6 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,6 +40,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'check_consistency': ('name', 'consistency_token', ),
+ 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ),
'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ),
'create_backup': ('parent', 'backup_id', 'backup', ),
'create_cluster': ('parent', 'cluster_id', 'cluster', ),
diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py
index 11ffed53f..8d32e5b70 100644
--- a/scripts/fixup_bigtable_v2_keywords.py
+++ b/scripts/fixup_bigtable_v2_keywords.py
@@ -1,6 +1,6 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -46,7 +46,7 @@ class bigtableCallTransformer(cst.CSTTransformer):
'ping_and_warm': ('name', 'app_profile_id', ),
'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ),
'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ),
- 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', ),
+ 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ),
'sample_row_keys': ('table_name', 'app_profile_id', ),
}
diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py
index 91b59676b..1acc11983 100644
--- a/scripts/readme-gen/readme_gen.py
+++ b/scripts/readme-gen/readme_gen.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-# Copyright 2016 Google Inc
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,17 +33,17 @@
autoescape=True,
)
-README_TMPL = jinja_env.get_template('README.tmpl.rst')
+README_TMPL = jinja_env.get_template("README.tmpl.rst")
def get_help(file):
- return subprocess.check_output(['python', file, '--help']).decode()
+ return subprocess.check_output(["python", file, "--help"]).decode()
def main():
parser = argparse.ArgumentParser()
- parser.add_argument('source')
- parser.add_argument('--destination', default='README.rst')
+ parser.add_argument("source")
+ parser.add_argument("--destination", default="README.rst")
args = parser.parse_args()
@@ -51,9 +51,9 @@ def main():
root = os.path.dirname(source)
destination = os.path.join(root, args.destination)
- jinja_env.globals['get_help'] = get_help
+ jinja_env.globals["get_help"] = get_help
- with io.open(source, 'r') as f:
+ with io.open(source, "r") as f:
config = yaml.load(f)
# This allows get_help to execute in the right directory.
@@ -61,9 +61,9 @@ def main():
output = README_TMPL.render(config)
- with io.open(destination, 'w') as f:
+ with io.open(destination, "w") as f:
f.write(output)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/setup.cfg b/setup.cfg
index c3a2b39f6..052350089 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2020 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/setup.py b/setup.py
index e5efc9937..e9bce0960 100644
--- a/setup.py
+++ b/setup.py
@@ -37,8 +37,8 @@
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
- "google-api-core[grpc] == 2.12.0.dev1", # TODO: change to >= after streaming retries is merged
- "google-cloud-core >= 1.4.1, <3.0.0dev",
+ "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*",
+ "google-cloud-core >= 1.4.4, <3.0.0dev",
"grpc-google-iam-v1 >= 0.12.4, <1.0.0dev",
"proto-plus >= 1.22.0, <2.0.0dev",
"proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'",
@@ -59,16 +59,10 @@
# benchmarks, etc.
packages = [
package
- for package in setuptools.PEP420PackageFinder.find()
+ for package in setuptools.find_namespace_packages()
if package.startswith("google")
]
-# Determine which namespaces are needed.
-namespaces = ["google"]
-if "google.cloud" in packages:
- namespaces.append("google.cloud")
-
-
setuptools.setup(
name=name,
version=version,
@@ -88,12 +82,12 @@
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
- namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
scripts=[
diff --git a/testing/constraints-3.12.txt b/testing/constraints-3.12.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt
index 9f23121d1..0718fa655 100644
--- a/testing/constraints-3.7.txt
+++ b/testing/constraints-3.7.txt
@@ -5,8 +5,8 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==2.12.0.dev1
-google-cloud-core==2.3.2
+google-api-core==1.34.0
+google-cloud-core==1.4.4
grpc-google-iam-v1==0.12.4
proto-plus==1.22.0
libcst==0.2.5
diff --git a/tests/__init__.py b/tests/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/system/conftest.py b/tests/system/conftest.py
index b8862ea4b..a2442621b 100644
--- a/tests/system/conftest.py
+++ b/tests/system/conftest.py
@@ -20,6 +20,193 @@
script_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(script_path)
-pytest_plugins = [
- "data.setup_fixtures",
-]
+from google.cloud.bigtable.client import Client
+from google.cloud.environment_vars import BIGTABLE_EMULATOR
+
+from . import _helpers
+
+
+@pytest.fixture(scope="session")
+def in_emulator():
+ return os.getenv(BIGTABLE_EMULATOR) is not None
+
+
+@pytest.fixture(scope="session")
+def kms_key_name():
+ return os.getenv("KMS_KEY_NAME")
+
+
+@pytest.fixture(scope="session")
+def with_kms_key_name(kms_key_name):
+ if kms_key_name is None:
+ pytest.skip("Test requires KMS_KEY_NAME environment variable")
+ return kms_key_name
+
+
+@pytest.fixture(scope="session")
+def skip_on_emulator(in_emulator):
+ if in_emulator:
+ pytest.skip("Emulator does not support this feature")
+
+
+@pytest.fixture(scope="session")
+def unique_suffix():
+ return unique_resource_id("-")
+
+
+@pytest.fixture(scope="session")
+def location_id():
+ return "us-central1-c"
+
+
+@pytest.fixture(scope="session")
+def serve_nodes():
+ return 1
+
+
+@pytest.fixture(scope="session")
+def label_key():
+ return "python-system"
+
+
+@pytest.fixture(scope="session")
+def instance_labels(label_key):
+ return {label_key: _helpers.label_stamp()}
+
+
+@pytest.fixture(scope="session")
+def admin_client():
+ return Client(admin=True)
+
+
+@pytest.fixture(scope="session")
+def service_account(admin_client):
+ from google.oauth2.service_account import Credentials
+
+ if not isinstance(admin_client._credentials, Credentials):
+ pytest.skip("These tests require a service account credential")
+ return admin_client._credentials
+
+
+@pytest.fixture(scope="session")
+def admin_instance_id(unique_suffix):
+ return f"g-c-p{unique_suffix}"
+
+
+@pytest.fixture(scope="session")
+def admin_cluster_id(admin_instance_id):
+ return f"{admin_instance_id}-cluster"
+
+
+@pytest.fixture(scope="session")
+def admin_instance(admin_client, admin_instance_id, instance_labels):
+ return admin_client.instance(admin_instance_id, labels=instance_labels)
+
+
+@pytest.fixture(scope="session")
+def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes):
+ return admin_instance.cluster(
+ admin_cluster_id,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ )
+
+
+@pytest.fixture(scope="session")
+def admin_cluster_with_autoscaling(
+ admin_instance,
+ admin_cluster_id,
+ location_id,
+ min_serve_nodes,
+ max_serve_nodes,
+ cpu_utilization_percent,
+):
+ return admin_instance.cluster(
+ admin_cluster_id,
+ location_id=location_id,
+ min_serve_nodes=min_serve_nodes,
+ max_serve_nodes=max_serve_nodes,
+ cpu_utilization_percent=cpu_utilization_percent,
+ )
+
+
+@pytest.fixture(scope="session")
+def admin_instance_populated(admin_instance, admin_cluster, in_emulator):
+ # Emulator does not support instance admin operations (create / delete).
+ # See: https://cloud.google.com/bigtable/docs/emulator
+ if not in_emulator:
+ operation = admin_instance.create(clusters=[admin_cluster])
+ operation.result(timeout=240)
+
+ yield admin_instance
+
+ if not in_emulator:
+ _helpers.retry_429(admin_instance.delete)()
+
+
+@pytest.fixture(scope="session")
+def data_client():
+ return Client(admin=False)
+
+
+@pytest.fixture(scope="session")
+def data_instance_id(unique_suffix):
+ return f"g-c-p-d{unique_suffix}"
+
+
+@pytest.fixture(scope="session")
+def data_cluster_id(data_instance_id):
+ return f"{data_instance_id}-cluster"
+
+
+@pytest.fixture(scope="session")
+def data_instance_populated(
+ admin_client,
+ data_instance_id,
+ instance_labels,
+ data_cluster_id,
+ location_id,
+ serve_nodes,
+ in_emulator,
+):
+ instance = admin_client.instance(data_instance_id, labels=instance_labels)
+ # Emulator does not support instance admin operations (create / delete).
+ # See: https://cloud.google.com/bigtable/docs/emulator
+ if not in_emulator:
+ cluster = instance.cluster(
+ data_cluster_id,
+ location_id=location_id,
+ serve_nodes=serve_nodes,
+ )
+ operation = instance.create(clusters=[cluster])
+ operation.result(timeout=240)
+
+ yield instance
+
+ if not in_emulator:
+ _helpers.retry_429(instance.delete)()
+
+
+@pytest.fixture(scope="function")
+def instances_to_delete():
+ instances_to_delete = []
+
+ yield instances_to_delete
+
+ for instance in instances_to_delete:
+ _helpers.retry_429(instance.delete)()
+
+
+@pytest.fixture(scope="session")
+def min_serve_nodes(in_emulator):
+ return 1
+
+
+@pytest.fixture(scope="session")
+def max_serve_nodes(in_emulator):
+ return 8
+
+
+@pytest.fixture(scope="session")
+def cpu_utilization_percent(in_emulator):
+ return 10
diff --git a/tests/system/v2_client/test_data_api.py b/tests/system/v2_client/test_data_api.py
index 2ca7e1504..579837e34 100644
--- a/tests/system/v2_client/test_data_api.py
+++ b/tests/system/v2_client/test_data_api.py
@@ -381,3 +381,39 @@ def test_access_with_non_admin_client(data_client, data_instance_id, data_table_
instance = data_client.instance(data_instance_id)
table = instance.table(data_table_id)
assert table.read_row("nonesuch") is None # no raise
+
+
+def test_mutations_batcher_threading(data_table, rows_to_delete):
+ """
+ Test the mutations batcher by sending a bunch of mutations using different
+ flush methods
+ """
+ import mock
+ import time
+ from google.cloud.bigtable.batcher import MutationsBatcher
+
+ num_sent = 20
+ all_results = []
+
+ def callback(results):
+ all_results.extend(results)
+
+ # override flow control max elements
+ with mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", 2):
+ with MutationsBatcher(
+ data_table,
+ flush_count=5,
+ flush_interval=0.07,
+ batch_completed_callback=callback,
+ ) as batcher:
+ # send mutations in a way that timed flushes and count flushes interleave
+ for i in range(num_sent):
+ row = data_table.direct_row("row{}".format(i))
+ row.set_cell(
+ COLUMN_FAMILY_ID1, COL_NAME1, "val{}".format(i).encode("utf-8")
+ )
+ rows_to_delete.append(row)
+ batcher.mutate(row)
+ time.sleep(0.01)
+ # ensure all mutations were sent
+ assert len(all_results) == num_sent
diff --git a/tests/system/v2_client/test_instance_admin.py b/tests/system/v2_client/test_instance_admin.py
index e5e311213..bd5c7e912 100644
--- a/tests/system/v2_client/test_instance_admin.py
+++ b/tests/system/v2_client/test_instance_admin.py
@@ -28,7 +28,6 @@ def _create_app_profile_helper(
allow_transactional_writes=None,
ignore_warnings=None,
):
-
app_profile = instance.app_profile(
app_profile_id=app_profile_id,
routing_policy_type=routing_policy_type,
diff --git a/tests/unit/gapic/__init__.py b/tests/unit/gapic/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/tests/unit/gapic/__init__.py
+++ b/tests/unit/gapic/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/unit/gapic/bigtable_admin_v2/__init__.py b/tests/unit/gapic/bigtable_admin_v2/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/tests/unit/gapic/bigtable_admin_v2/__init__.py
+++ b/tests/unit/gapic/bigtable_admin_v2/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
index 76715f1ed..ddbf0032f 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -63,7 +63,7 @@
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
+from google.longrunning import operations_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
@@ -2425,11 +2425,6 @@ def test_get_cluster(request_type, transport: str = "grpc"):
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
- cluster_config=instance.Cluster.ClusterConfig(
- cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig(
- autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600)
- )
- ),
)
response = client.get_cluster(request)
@@ -3529,9 +3524,7 @@ def test_create_app_profile(request_type, transport: str = "grpc"):
name="name_value",
etag="etag_value",
description="description_value",
- multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
- cluster_ids=["cluster_ids_value"]
- ),
+ priority=instance.AppProfile.Priority.PRIORITY_LOW,
)
response = client.create_app_profile(request)
@@ -3801,9 +3794,7 @@ def test_get_app_profile(request_type, transport: str = "grpc"):
name="name_value",
etag="etag_value",
description="description_value",
- multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
- cluster_ids=["cluster_ids_value"]
- ),
+ priority=instance.AppProfile.Priority.PRIORITY_LOW,
)
response = client.get_app_profile(request)
@@ -4456,9 +4447,11 @@ async def test_list_app_profiles_async_pages():
RuntimeError,
)
pages = []
- async for page_ in (
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
await client.list_app_profiles(request={})
- ).pages: # pragma: no branch
+ ).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -6138,9 +6131,11 @@ async def test_list_hot_tablets_async_pages():
RuntimeError,
)
pages = []
- async for page_ in (
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
await client.list_hot_tablets(request={})
- ).pages: # pragma: no branch
+ ).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -6459,8 +6454,9 @@ def test_get_instance_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Instance.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Instance.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -6539,8 +6535,9 @@ def test_get_instance_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Instance.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Instance.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -6663,8 +6660,9 @@ def test_get_instance_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Instance.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Instance.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -6728,8 +6726,9 @@ def test_list_instances_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -6809,10 +6808,11 @@ def test_list_instances_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListInstancesResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -6939,8 +6939,9 @@ def test_list_instances_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7007,8 +7008,9 @@ def test_update_instance_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Instance.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Instance.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7086,8 +7088,9 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance):
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Instance.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Instance.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7215,6 +7218,75 @@ def test_partial_update_instance_rest(request_type):
"create_time": {"seconds": 751, "nanos": 543},
"satisfies_pzs": True,
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[
+ "instance"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["instance"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["instance"][field])):
+ del request_init["instance"][field][i][subfield]
+ else:
+ del request_init["instance"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -7395,15 +7467,6 @@ def test_partial_update_instance_rest_bad_request(
# send a request that will satisfy transcoding
request_init = {"instance": {"name": "projects/sample1/instances/sample2"}}
- request_init["instance"] = {
- "name": "projects/sample1/instances/sample2",
- "display_name": "display_name_value",
- "state": 1,
- "type_": 1,
- "labels": {},
- "create_time": {"seconds": 751, "nanos": 543},
- "satisfies_pzs": True,
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -7766,6 +7829,73 @@ def test_create_cluster_rest(request_type):
"default_storage_type": 1,
"encryption_config": {"kms_key_name": "kms_key_name_value"},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["cluster"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["cluster"][field])):
+ del request_init["cluster"][field][i][subfield]
+ else:
+ del request_init["cluster"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -7964,26 +8094,6 @@ def test_create_cluster_rest_bad_request(
# send a request that will satisfy transcoding
request_init = {"parent": "projects/sample1/instances/sample2"}
- request_init["cluster"] = {
- "name": "name_value",
- "location": "location_value",
- "state": 1,
- "serve_nodes": 1181,
- "cluster_config": {
- "cluster_autoscaling_config": {
- "autoscaling_limits": {
- "min_serve_nodes": 1600,
- "max_serve_nodes": 1602,
- },
- "autoscaling_targets": {
- "cpu_utilization_percent": 2483,
- "storage_utilization_gib_per_node": 3404,
- },
- }
- },
- "default_storage_type": 1,
- "encryption_config": {"kms_key_name": "kms_key_name_value"},
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -8088,18 +8198,14 @@ def test_get_cluster_rest(request_type):
state=instance.Cluster.State.READY,
serve_nodes=1181,
default_storage_type=common.StorageType.SSD,
- cluster_config=instance.Cluster.ClusterConfig(
- cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig(
- autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600)
- )
- ),
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Cluster.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Cluster.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8178,8 +8284,9 @@ def test_get_cluster_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Cluster.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Cluster.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8302,8 +8409,9 @@ def test_get_cluster_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.Cluster.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.Cluster.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8368,8 +8476,9 @@ def test_list_clusters_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8449,10 +8558,9 @@ def test_list_clusters_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8579,8 +8687,9 @@ def test_list_clusters_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8776,6 +8885,75 @@ def test_partial_update_cluster_rest(request_type):
"default_storage_type": 1,
"encryption_config": {"kms_key_name": "kms_key_name_value"},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[
+ "cluster"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["cluster"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["cluster"][field])):
+ del request_init["cluster"][field][i][subfield]
+ else:
+ del request_init["cluster"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -8958,26 +9136,6 @@ def test_partial_update_cluster_rest_bad_request(
request_init = {
"cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"}
}
- request_init["cluster"] = {
- "name": "projects/sample1/instances/sample2/clusters/sample3",
- "location": "location_value",
- "state": 1,
- "serve_nodes": 1181,
- "cluster_config": {
- "cluster_autoscaling_config": {
- "autoscaling_limits": {
- "min_serve_nodes": 1600,
- "max_serve_nodes": 1602,
- },
- "autoscaling_targets": {
- "cpu_utilization_percent": 2483,
- "storage_utilization_gib_per_node": 3404,
- },
- }
- },
- "default_storage_type": 1,
- "encryption_config": {"kms_key_name": "kms_key_name_value"},
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -9335,7 +9493,78 @@ def test_create_app_profile_rest(request_type):
"cluster_id": "cluster_id_value",
"allow_transactional_writes": True,
},
+ "priority": 1,
+ "standard_isolation": {"priority": 1},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[
+ "app_profile"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["app_profile"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["app_profile"][field])):
+ del request_init["app_profile"][field][i][subfield]
+ else:
+ del request_init["app_profile"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -9345,16 +9574,15 @@ def test_create_app_profile_rest(request_type):
name="name_value",
etag="etag_value",
description="description_value",
- multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
- cluster_ids=["cluster_ids_value"]
- ),
+ priority=instance.AppProfile.Priority.PRIORITY_LOW,
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9446,8 +9674,9 @@ def test_create_app_profile_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9554,18 +9783,6 @@ def test_create_app_profile_rest_bad_request(
# send a request that will satisfy transcoding
request_init = {"parent": "projects/sample1/instances/sample2"}
- request_init["app_profile"] = {
- "name": "name_value",
- "etag": "etag_value",
- "description": "description_value",
- "multi_cluster_routing_use_any": {
- "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"]
- },
- "single_cluster_routing": {
- "cluster_id": "cluster_id_value",
- "allow_transactional_writes": True,
- },
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -9605,8 +9822,9 @@ def test_create_app_profile_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9670,16 +9888,15 @@ def test_get_app_profile_rest(request_type):
name="name_value",
etag="etag_value",
description="description_value",
- multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny(
- cluster_ids=["cluster_ids_value"]
- ),
+ priority=instance.AppProfile.Priority.PRIORITY_LOW,
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9756,8 +9973,9 @@ def test_get_app_profile_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9882,8 +10100,9 @@ def test_get_app_profile_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = instance.AppProfile.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = instance.AppProfile.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9949,10 +10168,9 @@ def test_list_app_profiles_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10035,10 +10253,11 @@ def test_list_app_profiles_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10173,10 +10392,9 @@ def test_list_app_profiles_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10301,7 +10519,78 @@ def test_update_app_profile_rest(request_type):
"cluster_id": "cluster_id_value",
"allow_transactional_writes": True,
},
+ "priority": 1,
+ "standard_isolation": {"priority": 1},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[
+ "app_profile"
+ ]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["app_profile"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["app_profile"][field])):
+ del request_init["app_profile"][field][i][subfield]
+ else:
+ del request_init["app_profile"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -10496,18 +10785,6 @@ def test_update_app_profile_rest_bad_request(
"name": "projects/sample1/instances/sample2/appProfiles/sample3"
}
}
- request_init["app_profile"] = {
- "name": "projects/sample1/instances/sample2/appProfiles/sample3",
- "etag": "etag_value",
- "description": "description_value",
- "multi_cluster_routing_use_any": {
- "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"]
- },
- "single_cluster_routing": {
- "cluster_id": "cluster_id_value",
- "allow_transactional_writes": True,
- },
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -10896,8 +11173,7 @@ def test_get_iam_policy_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10974,8 +11250,7 @@ def test_get_iam_policy_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11096,8 +11371,7 @@ def test_get_iam_policy_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11163,8 +11437,7 @@ def test_set_iam_policy_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11241,8 +11514,7 @@ def test_set_iam_policy_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11371,8 +11643,7 @@ def test_set_iam_policy_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11437,8 +11708,7 @@ def test_test_iam_permissions_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11518,8 +11788,7 @@ def test_test_iam_permissions_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11651,8 +11920,7 @@ def test_test_iam_permissions_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11718,10 +11986,9 @@ def test_list_hot_tablets_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11805,10 +12072,11 @@ def test_list_hot_tablets_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11947,10 +12215,9 @@ def test_list_hot_tablets_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
index 8498e4fa5..b29dc5106 100644
--- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
+++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,7 +62,7 @@
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
-from google.longrunning import operations_pb2
+from google.longrunning import operations_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
@@ -1691,9 +1691,11 @@ async def test_list_tables_async_pages():
RuntimeError,
)
pages = []
- async for page_ in (
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
await client.list_tables(request={})
- ).pages: # pragma: no branch
+ ).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -4457,9 +4459,11 @@ async def test_list_snapshots_async_pages():
RuntimeError,
)
pages = []
- async for page_ in (
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
await client.list_snapshots(request={})
- ).pages: # pragma: no branch
+ ).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -4956,6 +4960,7 @@ def test_get_backup(request_type, transport: str = "grpc"):
call.return_value = table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -4970,6 +4975,7 @@ def test_get_backup(request_type, transport: str = "grpc"):
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -5010,6 +5016,7 @@ async def test_get_backup_async(
table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -5025,6 +5032,7 @@ async def test_get_backup_async(
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -5196,6 +5204,7 @@ def test_update_backup(request_type, transport: str = "grpc"):
call.return_value = table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -5210,6 +5219,7 @@ def test_update_backup(request_type, transport: str = "grpc"):
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -5251,6 +5261,7 @@ async def test_update_backup_async(
table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -5266,6 +5277,7 @@ async def test_update_backup_async(
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -6058,9 +6070,11 @@ async def test_list_backups_async_pages():
RuntimeError,
)
pages = []
- async for page_ in (
+ # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
+ # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
+ async for page_ in ( # pragma: no branch
await client.list_backups(request={})
- ).pages: # pragma: no branch
+ ).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@@ -6211,6 +6225,262 @@ async def test_restore_table_field_headers_async():
) in kw["metadata"]
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.CopyBackupRequest,
+ dict,
+ ],
+)
+def test_copy_backup(request_type, transport: str = "grpc"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.copy_backup(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.CopyBackupRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_copy_backup_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ client.copy_backup()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.CopyBackupRequest()
+
+
+@pytest.mark.asyncio
+async def test_copy_backup_async(
+ transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest
+):
+ client = BigtableTableAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.copy_backup(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == bigtable_table_admin.CopyBackupRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_copy_backup_async_from_dict():
+ await test_copy_backup_async(request_type=dict)
+
+
+def test_copy_backup_field_headers():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.CopyBackupRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.copy_backup(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_copy_backup_field_headers_async():
+ client = BigtableTableAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = bigtable_table_admin.CopyBackupRequest()
+
+ request.parent = "parent_value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.copy_backup(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert (
+ "x-goog-request-params",
+ "parent=parent_value",
+ ) in kw["metadata"]
+
+
+def test_copy_backup_flattened():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.copy_backup(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].backup_id
+ mock_val = "backup_id_value"
+ assert arg == mock_val
+ arg = args[0].source_backup
+ mock_val = "source_backup_value"
+ assert arg == mock_val
+ assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(
+ seconds=751
+ )
+
+
+def test_copy_backup_flattened_error():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.copy_backup(
+ bigtable_table_admin.CopyBackupRequest(),
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+
+
+@pytest.mark.asyncio
+async def test_copy_backup_flattened_async():
+ client = BigtableTableAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.copy_backup), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.copy_backup(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ arg = args[0].parent
+ mock_val = "parent_value"
+ assert arg == mock_val
+ arg = args[0].backup_id
+ mock_val = "backup_id_value"
+ assert arg == mock_val
+ arg = args[0].source_backup
+ mock_val = "source_backup_value"
+ assert arg == mock_val
+ assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(
+ seconds=751
+ )
+
+
+@pytest.mark.asyncio
+async def test_copy_backup_flattened_error_async():
+ client = BigtableTableAdminAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.copy_backup(
+ bigtable_table_admin.CopyBackupRequest(),
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+
+
@pytest.mark.parametrize(
"request_type",
[
@@ -7015,8 +7285,9 @@ def test_create_table_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = gba_table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = gba_table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7098,8 +7369,9 @@ def test_create_table_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = gba_table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = gba_table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7233,8 +7505,9 @@ def test_create_table_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = gba_table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = gba_table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7586,8 +7859,9 @@ def test_list_tables_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7670,8 +7944,9 @@ def test_list_tables_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7805,8 +8080,9 @@ def test_list_tables_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListTablesResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -7929,8 +8205,9 @@ def test_get_table_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8009,8 +8286,9 @@ def test_get_table_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8133,8 +8411,9 @@ def test_get_table_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -8200,11 +8479,79 @@ def test_update_table_rest(request_type):
"start_time": {"seconds": 751, "nanos": 543},
"end_time": {},
"source_table": "source_table_value",
+ "source_backup": "source_backup_value",
},
},
"change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}},
"deletion_protection": True,
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["table"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["table"][field])):
+ del request_init["table"][field][i][subfield]
+ else:
+ del request_init["table"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -8386,23 +8733,6 @@ def test_update_table_rest_bad_request(
request_init = {
"table": {"name": "projects/sample1/instances/sample2/tables/sample3"}
}
- request_init["table"] = {
- "name": "projects/sample1/instances/sample2/tables/sample3",
- "cluster_states": {},
- "column_families": {},
- "granularity": 1,
- "restore_info": {
- "source_type": 1,
- "backup_info": {
- "backup": "backup_value",
- "start_time": {"seconds": 751, "nanos": 543},
- "end_time": {},
- "source_table": "source_table_value",
- },
- },
- "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}},
- "deletion_protection": True,
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -9027,8 +9357,9 @@ def test_modify_column_families_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9106,8 +9437,9 @@ def test_modify_column_families_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9244,8 +9576,9 @@ def test_modify_column_families_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Table.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Table.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9514,10 +9847,11 @@ def test_generate_consistency_token_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9593,10 +9927,11 @@ def test_generate_consistency_token_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9724,10 +10059,11 @@ def test_generate_consistency_token_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9792,8 +10128,9 @@ def test_check_consistency_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -9873,10 +10210,11 @@ def test_check_consistency_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10012,8 +10350,9 @@ def test_check_consistency_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10371,8 +10710,9 @@ def test_get_snapshot_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Snapshot.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Snapshot.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10450,8 +10790,9 @@ def test_get_snapshot_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Snapshot.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Snapshot.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10578,8 +10919,9 @@ def test_get_snapshot_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Snapshot.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Snapshot.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10644,8 +10986,9 @@ def test_list_snapshots_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10727,10 +11070,9 @@ def test_list_snapshots_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(
- return_value
- )
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -10865,8 +11207,9 @@ def test_list_snapshots_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11240,6 +11583,7 @@ def test_create_backup_rest(request_type):
request_init["backup"] = {
"name": "name_value",
"source_table": "source_table_value",
+ "source_backup": "source_backup_value",
"expire_time": {"seconds": 751, "nanos": 543},
"start_time": {},
"end_time": {},
@@ -11260,10 +11604,77 @@ def test_create_backup_rest(request_type):
"kms_key_version": "kms_key_version_value",
},
}
- request = request_type(**request_init)
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
- # Mock the http request call within the method and fake a response.
- with mock.patch.object(type(client.transport._session), "request") as req:
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["backup"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["backup"][field])):
+ del request_init["backup"][field][i][subfield]
+ else:
+ del request_init["backup"][field][subfield]
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = operations_pb2.Operation(name="operations/spam")
@@ -11458,29 +11869,6 @@ def test_create_backup_rest_bad_request(
# send a request that will satisfy transcoding
request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
- request_init["backup"] = {
- "name": "name_value",
- "source_table": "source_table_value",
- "expire_time": {"seconds": 751, "nanos": 543},
- "start_time": {},
- "end_time": {},
- "size_bytes": 1089,
- "state": 1,
- "encryption_info": {
- "encryption_type": 1,
- "encryption_status": {
- "code": 411,
- "message": "message_value",
- "details": [
- {
- "type_url": "type.googleapis.com/google.protobuf.Duration",
- "value": b"\x08\x0c\x10\xdb\x07",
- }
- ],
- },
- "kms_key_version": "kms_key_version_value",
- },
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -11587,6 +11975,7 @@ def test_get_backup_rest(request_type):
return_value = table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -11594,8 +11983,9 @@ def test_get_backup_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11605,6 +11995,7 @@ def test_get_backup_rest(request_type):
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -11673,8 +12064,9 @@ def test_get_backup_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11801,8 +12193,9 @@ def test_get_backup_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11862,6 +12255,7 @@ def test_update_backup_rest(request_type):
request_init["backup"] = {
"name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4",
"source_table": "source_table_value",
+ "source_backup": "source_backup_value",
"expire_time": {"seconds": 751, "nanos": 543},
"start_time": {},
"end_time": {},
@@ -11882,6 +12276,73 @@ def test_update_backup_rest(request_type):
"kms_key_version": "kms_key_version_value",
},
}
+ # The version of a generated dependency at test runtime may differ from the version used during generation.
+ # Delete any fields which are not present in the current runtime dependency
+ # See https://github.com/googleapis/gapic-generator-python/issues/1748
+
+ # Determine if the message type is proto-plus or protobuf
+ test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"]
+
+ def get_message_fields(field):
+ # Given a field which is a message (composite type), return a list with
+ # all the fields of the message.
+ # If the field is not a composite type, return an empty list.
+ message_fields = []
+
+ if hasattr(field, "message") and field.message:
+ is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR")
+
+ if is_field_type_proto_plus_type:
+ message_fields = field.message.meta.fields.values()
+ # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types
+ else: # pragma: NO COVER
+ message_fields = field.message.DESCRIPTOR.fields
+ return message_fields
+
+ runtime_nested_fields = [
+ (field.name, nested_field.name)
+ for field in get_message_fields(test_field)
+ for nested_field in get_message_fields(field)
+ ]
+
+ subfields_not_in_runtime = []
+
+ # For each item in the sample request, create a list of sub fields which are not present at runtime
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for field, value in request_init["backup"].items(): # pragma: NO COVER
+ result = None
+ is_repeated = False
+ # For repeated fields
+ if isinstance(value, list) and len(value):
+ is_repeated = True
+ result = value[0]
+ # For fields where the type is another message
+ if isinstance(value, dict):
+ result = value
+
+ if result and hasattr(result, "keys"):
+ for subfield in result.keys():
+ if (field, subfield) not in runtime_nested_fields:
+ subfields_not_in_runtime.append(
+ {
+ "field": field,
+ "subfield": subfield,
+ "is_repeated": is_repeated,
+ }
+ )
+
+ # Remove fields from the sample request which are not present in the runtime version of the dependency
+ # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime
+ for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER
+ field = subfield_to_delete.get("field")
+ field_repeated = subfield_to_delete.get("is_repeated")
+ subfield = subfield_to_delete.get("subfield")
+ if subfield:
+ if field_repeated:
+ for i in range(0, len(request_init["backup"][field])):
+ del request_init["backup"][field][i][subfield]
+ else:
+ del request_init["backup"][field][subfield]
request = request_type(**request_init)
# Mock the http request call within the method and fake a response.
@@ -11890,6 +12351,7 @@ def test_update_backup_rest(request_type):
return_value = table.Backup(
name="name_value",
source_table="source_table_value",
+ source_backup="source_backup_value",
size_bytes=1089,
state=table.Backup.State.CREATING,
)
@@ -11897,8 +12359,9 @@ def test_update_backup_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -11908,6 +12371,7 @@ def test_update_backup_rest(request_type):
assert isinstance(response, table.Backup)
assert response.name == "name_value"
assert response.source_table == "source_table_value"
+ assert response.source_backup == "source_backup_value"
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@@ -11974,8 +12438,9 @@ def test_update_backup_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -12073,29 +12538,6 @@ def test_update_backup_rest_bad_request(
"name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4"
}
}
- request_init["backup"] = {
- "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4",
- "source_table": "source_table_value",
- "expire_time": {"seconds": 751, "nanos": 543},
- "start_time": {},
- "end_time": {},
- "size_bytes": 1089,
- "state": 1,
- "encryption_info": {
- "encryption_type": 1,
- "encryption_status": {
- "code": 411,
- "message": "message_value",
- "details": [
- {
- "type_url": "type.googleapis.com/google.protobuf.Duration",
- "value": b"\x08\x0c\x10\xdb\x07",
- }
- ],
- },
- "kms_key_version": "kms_key_version_value",
- },
- }
request = request_type(**request_init)
# Mock the http request call within the method and fake a BadRequest error.
@@ -12138,8 +12580,9 @@ def test_update_backup_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = table.Backup.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = table.Backup.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -12464,8 +12907,9 @@ def test_list_backups_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -12549,8 +12993,9 @@ def test_list_backups_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -12687,8 +13132,9 @@ def test_list_backups_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13006,6 +13452,296 @@ def test_restore_table_rest_error():
)
+@pytest.mark.parametrize(
+ "request_type",
+ [
+ bigtable_table_admin.CopyBackupRequest,
+ dict,
+ ],
+)
+def test_copy_backup_rest(request_type):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+ response = client.copy_backup(request)
+
+ # Establish that the response is the type that we expect.
+ assert response.operation.name == "operations/spam"
+
+
+def test_copy_backup_rest_required_fields(
+ request_type=bigtable_table_admin.CopyBackupRequest,
+):
+ transport_class = transports.BigtableTableAdminRestTransport
+
+ request_init = {}
+ request_init["parent"] = ""
+ request_init["backup_id"] = ""
+ request_init["source_backup"] = ""
+ request = request_type(**request_init)
+ pb_request = request_type.pb(request)
+ jsonified_request = json.loads(
+ json_format.MessageToJson(
+ pb_request,
+ including_default_value_fields=False,
+ use_integers_for_enums=False,
+ )
+ )
+
+ # verify fields with default values are dropped
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).copy_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with default values are now present
+
+ jsonified_request["parent"] = "parent_value"
+ jsonified_request["backupId"] = "backup_id_value"
+ jsonified_request["sourceBackup"] = "source_backup_value"
+
+ unset_fields = transport_class(
+ credentials=ga_credentials.AnonymousCredentials()
+ ).copy_backup._get_unset_required_fields(jsonified_request)
+ jsonified_request.update(unset_fields)
+
+ # verify required fields with non-default values are left alone
+ assert "parent" in jsonified_request
+ assert jsonified_request["parent"] == "parent_value"
+ assert "backupId" in jsonified_request
+ assert jsonified_request["backupId"] == "backup_id_value"
+ assert "sourceBackup" in jsonified_request
+ assert jsonified_request["sourceBackup"] == "source_backup_value"
+
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+ request = request_type(**request_init)
+
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(Session, "request") as req:
+ # We need to mock transcode() because providing default values
+ # for required fields will fail the real version if the http_options
+ # expect actual values for those fields.
+ with mock.patch.object(path_template, "transcode") as transcode:
+ # A uri without fields and an empty body will force all the
+ # request fields to show up in the query_params.
+ pb_request = request_type.pb(request)
+ transcode_result = {
+ "uri": "v1/sample_method",
+ "method": "post",
+ "query_params": pb_request,
+ }
+ transcode_result["body"] = pb_request
+ transcode.return_value = transcode_result
+
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+
+ response = client.copy_backup(request)
+
+ expected_params = [("$alt", "json;enum-encoding=int")]
+ actual_params = req.call_args.kwargs["params"]
+ assert expected_params == actual_params
+
+
+def test_copy_backup_rest_unset_required_fields():
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials
+ )
+
+ unset_fields = transport.copy_backup._get_unset_required_fields({})
+ assert set(unset_fields) == (
+ set(())
+ & set(
+ (
+ "parent",
+ "backupId",
+ "sourceBackup",
+ "expireTime",
+ )
+ )
+ )
+
+
+@pytest.mark.parametrize("null_interceptor", [True, False])
+def test_copy_backup_rest_interceptors(null_interceptor):
+ transport = transports.BigtableTableAdminRestTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ interceptor=None
+ if null_interceptor
+ else transports.BigtableTableAdminRestInterceptor(),
+ )
+ client = BigtableTableAdminClient(transport=transport)
+ with mock.patch.object(
+ type(client.transport._session), "request"
+ ) as req, mock.patch.object(
+ path_template, "transcode"
+ ) as transcode, mock.patch.object(
+ operation.Operation, "_set_result_from_operation"
+ ), mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "post_copy_backup"
+ ) as post, mock.patch.object(
+ transports.BigtableTableAdminRestInterceptor, "pre_copy_backup"
+ ) as pre:
+ pre.assert_not_called()
+ post.assert_not_called()
+ pb_message = bigtable_table_admin.CopyBackupRequest.pb(
+ bigtable_table_admin.CopyBackupRequest()
+ )
+ transcode.return_value = {
+ "method": "post",
+ "uri": "my_uri",
+ "body": pb_message,
+ "query_params": pb_message,
+ }
+
+ req.return_value = Response()
+ req.return_value.status_code = 200
+ req.return_value.request = PreparedRequest()
+ req.return_value._content = json_format.MessageToJson(
+ operations_pb2.Operation()
+ )
+
+ request = bigtable_table_admin.CopyBackupRequest()
+ metadata = [
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ]
+ pre.return_value = request, metadata
+ post.return_value = operations_pb2.Operation()
+
+ client.copy_backup(
+ request,
+ metadata=[
+ ("key", "val"),
+ ("cephalopod", "squid"),
+ ],
+ )
+
+ pre.assert_called_once()
+ post.assert_called_once()
+
+
+def test_copy_backup_rest_bad_request(
+ transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest
+):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # send a request that will satisfy transcoding
+ request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"}
+ request = request_type(**request_init)
+
+ # Mock the http request call within the method and fake a BadRequest error.
+ with mock.patch.object(Session, "request") as req, pytest.raises(
+ core_exceptions.BadRequest
+ ):
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 400
+ response_value.request = Request()
+ req.return_value = response_value
+ client.copy_backup(request)
+
+
+def test_copy_backup_rest_flattened():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport="rest",
+ )
+
+ # Mock the http request call within the method and fake a response.
+ with mock.patch.object(type(client.transport._session), "request") as req:
+ # Designate an appropriate value for the returned response.
+ return_value = operations_pb2.Operation(name="operations/spam")
+
+ # get arguments that satisfy an http rule for this method
+ sample_request = {
+ "parent": "projects/sample1/instances/sample2/clusters/sample3"
+ }
+
+ # get truthy value for each flattened field
+ mock_args = dict(
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+ mock_args.update(sample_request)
+
+ # Wrap the value into a proper Response obj
+ response_value = Response()
+ response_value.status_code = 200
+ json_return_value = json_format.MessageToJson(return_value)
+ response_value._content = json_return_value.encode("UTF-8")
+ req.return_value = response_value
+
+ client.copy_backup(**mock_args)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(req.mock_calls) == 1
+ _, args, _ = req.mock_calls[0]
+ assert path_template.validate(
+ "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy"
+ % client.transport._host,
+ args[1],
+ )
+
+
+def test_copy_backup_rest_flattened_error(transport: str = "rest"):
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ transport=transport,
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.copy_backup(
+ bigtable_table_admin.CopyBackupRequest(),
+ parent="parent_value",
+ backup_id="backup_id_value",
+ source_backup="source_backup_value",
+ expire_time=timestamp_pb2.Timestamp(seconds=751),
+ )
+
+
+def test_copy_backup_rest_error():
+ client = BigtableTableAdminClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="rest"
+ )
+
+
@pytest.mark.parametrize(
"request_type",
[
@@ -13034,8 +13770,7 @@ def test_get_iam_policy_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13112,8 +13847,7 @@ def test_get_iam_policy_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13236,8 +13970,7 @@ def test_get_iam_policy_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13303,8 +14036,7 @@ def test_set_iam_policy_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13381,8 +14113,7 @@ def test_set_iam_policy_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13513,8 +14244,7 @@ def test_set_iam_policy_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13579,8 +14309,7 @@ def test_test_iam_permissions_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13660,8 +14389,7 @@ def test_test_iam_permissions_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13795,8 +14523,7 @@ def test_test_iam_permissions_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = return_value
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -13995,6 +14722,7 @@ def test_bigtable_table_admin_base_transport():
"delete_backup",
"list_backups",
"restore_table",
+ "copy_backup",
"get_iam_policy",
"set_iam_policy",
"test_iam_permissions",
@@ -14371,6 +15099,9 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name)
session1 = client1.transport.restore_table._session
session2 = client2.transport.restore_table._session
assert session1 != session2
+ session1 = client1.transport.copy_backup._session
+ session2 = client2.transport.copy_backup._session
+ assert session1 != session2
session1 = client1.transport.get_iam_policy._session
session2 = client2.transport.get_iam_policy._session
assert session1 != session2
diff --git a/tests/unit/gapic/bigtable_v2/__init__.py b/tests/unit/gapic/bigtable_v2/__init__.py
index e8e1c3845..89a37dc92 100644
--- a/tests/unit/gapic/bigtable_v2/__init__.py
+++ b/tests/unit/gapic/bigtable_v2/__init__.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py
index 03ba3044f..2319306d7 100644
--- a/tests/unit/gapic/bigtable_v2/test_bigtable.py
+++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2022 Google LLC
+# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -3004,8 +3004,9 @@ def test_read_rows_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
@@ -3086,8 +3087,9 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest):
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
@@ -3215,8 +3217,9 @@ def test_read_rows_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -3286,8 +3289,9 @@ def test_sample_row_keys_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.SampleRowKeysResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
@@ -3372,8 +3376,9 @@ def test_sample_row_keys_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.SampleRowKeysResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
@@ -3501,8 +3506,9 @@ def test_sample_row_keys_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.SampleRowKeysResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -3569,8 +3575,9 @@ def test_mutate_row_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -3647,8 +3654,9 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest)
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -3787,8 +3795,9 @@ def test_mutate_row_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -3858,8 +3867,9 @@ def test_mutate_rows_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
@@ -3939,8 +3949,9 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
@@ -4077,8 +4088,9 @@ def test_mutate_rows_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.MutateRowsResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.MutateRowsResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4148,8 +4160,9 @@ def test_check_and_mutate_row_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4229,8 +4242,9 @@ def test_check_and_mutate_row_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4386,8 +4400,9 @@ def test_check_and_mutate_row_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.CheckAndMutateRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4473,8 +4488,9 @@ def test_ping_and_warm_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.PingAndWarmResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.PingAndWarmResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4547,8 +4563,9 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.PingAndWarmResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.PingAndWarmResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4670,8 +4687,9 @@ def test_ping_and_warm_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.PingAndWarmResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.PingAndWarmResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4733,8 +4751,9 @@ def test_read_modify_write_row_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4813,8 +4832,9 @@ def test_read_modify_write_row_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -4951,8 +4971,9 @@ def test_read_modify_write_row_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -5018,10 +5039,11 @@ def test_generate_initial_change_stream_partitions_rest(request_type):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
@@ -5107,10 +5129,11 @@ def test_generate_initial_change_stream_partitions_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
@@ -5249,10 +5272,11 @@ def test_generate_initial_change_stream_partitions_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
+ # Convert return value to protobuf type
+ return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(
return_value
)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
@@ -5316,17 +5340,14 @@ def test_read_change_stream_rest(request_type):
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
- return_value = bigtable.ReadChangeStreamResponse(
- data_change=bigtable.ReadChangeStreamResponse.DataChange(
- type_=bigtable.ReadChangeStreamResponse.DataChange.Type.USER
- ),
- )
+ return_value = bigtable.ReadChangeStreamResponse()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
@@ -5408,8 +5429,9 @@ def test_read_change_stream_rest_required_fields(
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
@@ -5539,8 +5561,9 @@ def test_read_change_stream_rest_flattened():
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
- pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
- json_return_value = json_format.MessageToJson(pb_return_value)
+ # Convert return value to protobuf type
+ return_value = bigtable.ReadChangeStreamResponse.pb(return_value)
+ json_return_value = json_format.MessageToJson(return_value)
json_return_value = "[{}]".format(json_return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py
new file mode 100644
index 000000000..741d9f282
--- /dev/null
+++ b/tests/unit/test_batcher.py
@@ -0,0 +1,269 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import mock
+import time
+
+import pytest
+
+from google.cloud.bigtable.row import DirectRow
+from google.cloud.bigtable.batcher import (
+ _FlowControl,
+ MutationsBatcher,
+ MutationsBatchError,
+)
+
+TABLE_ID = "table-id"
+TABLE_NAME = "/tables/" + TABLE_ID
+
+
+def test_mutation_batcher_constructor():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table) as mutation_batcher:
+ assert table is mutation_batcher.table
+
+
+def test_mutation_batcher_w_user_callback():
+ table = _Table(TABLE_NAME)
+
+ def callback_fn(response):
+ callback_fn.count = len(response)
+
+ with MutationsBatcher(
+ table, flush_count=1, batch_completed_callback=callback_fn
+ ) as mutation_batcher:
+ rows = [
+ DirectRow(row_key=b"row_key"),
+ DirectRow(row_key=b"row_key_2"),
+ DirectRow(row_key=b"row_key_3"),
+ DirectRow(row_key=b"row_key_4"),
+ ]
+
+ mutation_batcher.mutate_rows(rows)
+
+ assert callback_fn.count == 4
+
+
+def test_mutation_batcher_mutate_row():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table=table) as mutation_batcher:
+ rows = [
+ DirectRow(row_key=b"row_key"),
+ DirectRow(row_key=b"row_key_2"),
+ DirectRow(row_key=b"row_key_3"),
+ DirectRow(row_key=b"row_key_4"),
+ ]
+
+ mutation_batcher.mutate_rows(rows)
+
+ assert table.mutation_calls == 1
+
+
+def test_mutation_batcher_mutate():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table=table) as mutation_batcher:
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", 1)
+ row.set_cell("cf1", b"c2", 2)
+ row.set_cell("cf1", b"c3", 3)
+ row.set_cell("cf1", b"c4", 4)
+
+ mutation_batcher.mutate(row)
+
+ assert table.mutation_calls == 1
+
+
+def test_mutation_batcher_flush_w_no_rows():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table=table) as mutation_batcher:
+ mutation_batcher.flush()
+
+ assert table.mutation_calls == 0
+
+
+def test_mutation_batcher_mutate_w_max_flush_count():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table=table, flush_count=3) as mutation_batcher:
+ row_1 = DirectRow(row_key=b"row_key_1")
+ row_2 = DirectRow(row_key=b"row_key_2")
+ row_3 = DirectRow(row_key=b"row_key_3")
+
+ mutation_batcher.mutate(row_1)
+ mutation_batcher.mutate(row_2)
+ mutation_batcher.mutate(row_3)
+
+ assert table.mutation_calls == 1
+
+
+@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3)
+def test_mutation_batcher_mutate_w_max_mutations():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(table=table) as mutation_batcher:
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", 1)
+ row.set_cell("cf1", b"c2", 2)
+ row.set_cell("cf1", b"c3", 3)
+
+ mutation_batcher.mutate(row)
+
+ assert table.mutation_calls == 1
+
+
+def test_mutation_batcher_mutate_w_max_row_bytes():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(
+ table=table, max_row_bytes=3 * 1024 * 1024
+ ) as mutation_batcher:
+ number_of_bytes = 1 * 1024 * 1024
+ max_value = b"1" * number_of_bytes
+
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", max_value)
+ row.set_cell("cf1", b"c2", max_value)
+ row.set_cell("cf1", b"c3", max_value)
+
+ mutation_batcher.mutate(row)
+
+ assert table.mutation_calls == 1
+
+
+def test_mutations_batcher_flushed_when_closed():
+ table = _Table(TABLE_NAME)
+ mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024)
+
+ number_of_bytes = 1 * 1024 * 1024
+ max_value = b"1" * number_of_bytes
+
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", max_value)
+ row.set_cell("cf1", b"c2", max_value)
+
+ mutation_batcher.mutate(row)
+ assert table.mutation_calls == 0
+
+ mutation_batcher.close()
+
+ assert table.mutation_calls == 1
+
+
+def test_mutations_batcher_context_manager_flushed_when_closed():
+ table = _Table(TABLE_NAME)
+ with MutationsBatcher(
+ table=table, max_row_bytes=3 * 1024 * 1024
+ ) as mutation_batcher:
+ number_of_bytes = 1 * 1024 * 1024
+ max_value = b"1" * number_of_bytes
+
+ row = DirectRow(row_key=b"row_key")
+ row.set_cell("cf1", b"c1", max_value)
+ row.set_cell("cf1", b"c2", max_value)
+
+ mutation_batcher.mutate(row)
+
+ assert table.mutation_calls == 1
+
+
+@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush")
+def test_mutations_batcher_flush_interval(mocked_flush):
+ table = _Table(TABLE_NAME)
+ flush_interval = 0.5
+ mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval)
+
+ assert mutation_batcher._timer.interval == flush_interval
+ mocked_flush.assert_not_called()
+
+ time.sleep(0.4)
+ mocked_flush.assert_not_called()
+
+ time.sleep(0.1)
+ mocked_flush.assert_called_once_with()
+
+ mutation_batcher.close()
+
+
+def test_mutations_batcher_response_with_error_codes():
+ from google.rpc.status_pb2 import Status
+
+ mocked_response = [Status(code=1), Status(code=5)]
+
+ with mock.patch("tests.unit.test_batcher._Table") as mocked_table:
+ table = mocked_table.return_value
+ mutation_batcher = MutationsBatcher(table=table)
+
+ row1 = DirectRow(row_key=b"row_key")
+ row2 = DirectRow(row_key=b"row_key")
+ table.mutate_rows.return_value = mocked_response
+
+ mutation_batcher.mutate_rows([row1, row2])
+ with pytest.raises(MutationsBatchError) as exc:
+ mutation_batcher.close()
+ assert exc.value.message == "Errors in batch mutations."
+ assert len(exc.value.exc) == 2
+
+ assert exc.value.exc[0].message == mocked_response[0].message
+ assert exc.value.exc[1].message == mocked_response[1].message
+
+
+def test_flow_control_event_is_set_when_not_blocked():
+ flow_control = _FlowControl()
+
+ flow_control.set_flow_control_status()
+ assert flow_control.event.is_set()
+
+
+def test_flow_control_event_is_not_set_when_blocked():
+ flow_control = _FlowControl()
+
+ flow_control.inflight_mutations = flow_control.max_mutations
+ flow_control.inflight_size = flow_control.max_mutation_bytes
+
+ flow_control.set_flow_control_status()
+ assert not flow_control.event.is_set()
+
+
+@mock.patch("concurrent.futures.ThreadPoolExecutor.submit")
+def test_flush_async_batch_count(mocked_executor_submit):
+ table = _Table(TABLE_NAME)
+ mutation_batcher = MutationsBatcher(table=table, flush_count=2)
+
+ number_of_bytes = 1 * 1024 * 1024
+ max_value = b"1" * number_of_bytes
+ for index in range(5):
+ row = DirectRow(row_key=f"row_key_{index}")
+ row.set_cell("cf1", b"c1", max_value)
+ mutation_batcher.mutate(row)
+ mutation_batcher._flush_async()
+
+ # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch.
+ assert mocked_executor_submit.call_count == 3
+
+
+class _Instance(object):
+ def __init__(self, client=None):
+ self._client = client
+
+
+class _Table(object):
+ def __init__(self, name, client=None):
+ self.name = name
+ self._instance = _Instance(client)
+ self.mutation_calls = 0
+
+ def mutate_rows(self, rows):
+ from google.rpc.status_pb2 import Status
+
+ self.mutation_calls += 1
+
+ return [Status(code=0) for _ in rows]
diff --git a/tests/unit/test_packaging.py b/tests/unit/test_packaging.py
new file mode 100644
index 000000000..93fa4d1c3
--- /dev/null
+++ b/tests/unit/test_packaging.py
@@ -0,0 +1,37 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+
+
+def test_namespace_package_compat(tmp_path):
+ # The ``google`` namespace package should not be masked
+ # by the presence of ``google-cloud-bigtable``.
+ google = tmp_path / "google"
+ google.mkdir()
+ google.joinpath("othermod.py").write_text("")
+ env = dict(os.environ, PYTHONPATH=str(tmp_path))
+ cmd = [sys.executable, "-m", "google.othermod"]
+ subprocess.check_call(cmd, env=env)
+
+ # The ``google.cloud`` namespace package should not be masked
+ # by the presence of ``google-cloud-bigtable``.
+ google_cloud = tmp_path / "google" / "cloud"
+ google_cloud.mkdir()
+ google_cloud.joinpath("othermod.py").write_text("")
+ env = dict(os.environ, PYTHONPATH=str(tmp_path))
+ cmd = [sys.executable, "-m", "google.cloud.othermod"]
+ subprocess.check_call(cmd, env=env)
diff --git a/tests/unit/v2_client/test_cluster.py b/tests/unit/v2_client/test_cluster.py
index cb0312b0c..65ed47437 100644
--- a/tests/unit/v2_client/test_cluster.py
+++ b/tests/unit/v2_client/test_cluster.py
@@ -752,7 +752,6 @@ def test_cluster_update_w_partial_autoscaling_config():
},
]
for config in cluster_config:
-
cluster = _make_cluster(
CLUSTER_ID,
instance,
@@ -927,7 +926,6 @@ def test_cluster_disable_autoscaling():
def test_create_cluster_with_both_manual_and_autoscaling():
-
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.enums import StorageType
@@ -955,7 +953,6 @@ def test_create_cluster_with_both_manual_and_autoscaling():
def test_create_cluster_with_partial_autoscaling_config():
-
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.enums import StorageType
@@ -996,7 +993,6 @@ def test_create_cluster_with_partial_autoscaling_config():
def test_create_cluster_with_no_scaling_config():
-
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.enums import StorageType
diff --git a/tests/unit/v2_client/test_column_family.py b/tests/unit/v2_client/test_column_family.py
index b164b2fc1..e4f74e264 100644
--- a/tests/unit/v2_client/test_column_family.py
+++ b/tests/unit/v2_client/test_column_family.py
@@ -595,7 +595,6 @@ def test__gc_rule_from_pb_unknown_field_name():
from google.cloud.bigtable.column_family import _gc_rule_from_pb
class MockProto(object):
-
names = []
_pb = {}
diff --git a/tests/unit/v2_client/test_instance.py b/tests/unit/v2_client/test_instance.py
index c577adca5..797e4bd9c 100644
--- a/tests/unit/v2_client/test_instance.py
+++ b/tests/unit/v2_client/test_instance.py
@@ -67,7 +67,6 @@ def _make_instance(*args, **kwargs):
def test_instance_constructor_defaults():
-
client = object()
instance = _make_instance(INSTANCE_ID, client)
assert instance.instance_id == INSTANCE_ID
diff --git a/tests/unit/v2_client/test_row_data.py b/tests/unit/v2_client/test_row_data.py
index fba69ceba..9f2c40a54 100644
--- a/tests/unit/v2_client/test_row_data.py
+++ b/tests/unit/v2_client/test_row_data.py
@@ -1118,7 +1118,6 @@ def test_RRRM_build_updated_request_row_ranges_valid():
class _MockCancellableIterator(object):
-
cancel_calls = 0
def __init__(self, *values):
@@ -1199,5 +1198,4 @@ def _read_rows_retry_exception(exc):
class _Client(object):
-
data_stub = None
diff --git a/tests/unit/v2_client/test_table.py b/tests/unit/v2_client/test_table.py
index 3d7d2e8ee..032363bd7 100644
--- a/tests/unit/v2_client/test_table.py
+++ b/tests/unit/v2_client/test_table.py
@@ -1067,8 +1067,8 @@ def test_table_yield_retry_rows():
for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2):
rows.append(row)
- assert len(warned) == 1
- assert warned[0].category is DeprecationWarning
+ assert len(warned) >= 1
+ assert DeprecationWarning in [w.category for w in warned]
result = rows[1]
assert result.row_key == ROW_KEY_2
@@ -1140,8 +1140,8 @@ def test_table_yield_rows_with_row_set():
for row in table.yield_rows(row_set=row_set):
rows.append(row)
- assert len(warned) == 1
- assert warned[0].category is DeprecationWarning
+ assert len(warned) >= 1
+ assert DeprecationWarning in [w.category for w in warned]
assert rows[0].row_key == ROW_KEY_1
assert rows[1].row_key == ROW_KEY_2
@@ -1689,7 +1689,6 @@ def _do_mutate_retryable_rows_helper(
expected_entries = []
for row, prior_status in zip(rows, worker.responses_statuses):
-
if prior_status is None or prior_status.code in RETRYABLES:
mutations = row._get_mutations().copy() # row clears on success
entry = data_messages_v2_pb2.MutateRowsRequest.Entry(